Python numpy 模块,stack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.stack()

项目:wurst    作者:IndEcol    | 项目源码 | 文件源码
def get_comma_separated_data(raw):
    # Convert to long string
    header, data = "".join(raw).strip().split(" = ")

    # Remove trailing comma
    assert data[-1] == ';'
    data = data[:-1]

    # Remove newline characters and convert to list
    data = eval(data.replace("\n", ''))

    shape = tuple(eval(header[header.index("["):header.index("]") + 1]))
    step_size = functools.reduce(operator.mul, shape) + 1
    years = np.array(data[::step_size], dtype=int)

    data = np.stack([
        np.array(data[1 + index * step_size:(index + 1) * step_size]).reshape(shape)
        for index in range(len(years))
    ], axis=-1)

    return header, years, data
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_3d_data_slices(slices):  # get data in Hunsfield Units
    slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))  # from v 9

    image = np.stack([s.pixel_array for s in slices])
    image = image.astype(np.int16)  # ensure int16 (it may be here uint16 for some images )
    image[image == -2000] = 0   #correcting cyindrical bound entrioes to 0

    # Convert to Hounsfield units (HU)
    # The intercept is usually -1024
    for slice_number in range(len(slices)):  # from v 8
        intercept = slices[slice_number].RescaleIntercept
        slope = slices[slice_number].RescaleSlope

        if slope != 1:  # added 16 Jan 2016, evening
            image[slice_number] = slope * image[slice_number].astype(np.float64)
            image[slice_number] = image[slice_number].astype(np.int16)

        image[slice_number] += np.int16(intercept)

    return np.array(image, dtype=np.int16)
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_pixels_hu(slices):
    image = np.stack([s.pixel_array for s in slices])
    image = image.astype(np.int16)

    # Set outside-of-scan pixels to 0
    # The intercept is usually -1024, so air is approximately 0
    image[image == -2000] = 0

    # Convert to Hounsfield units (HU)
    ### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
    ### Changes/correction - 31.01.2017
    for slice_number in range(len(slices)):

        intercept = slices[slice_number].RescaleIntercept
        slope = slices[slice_number].RescaleSlope

        if slope != 1:
            image[slice_number] = slope * image[slice_number].astype(np.float64)
            image[slice_number] = image[slice_number].astype(np.int16)

        image[slice_number] += np.int16(intercept)    

    return np.array(image, dtype=np.int16)
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_3d_data_slices(slices):  # get data in Hunsfield Units
    #slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
    #slices.sort(key=lambda x: int(x.InstanceNumber))  # was x.InstanceNumber
    slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))  # from v 8

    image = np.stack([s.pixel_array for s in slices])
    image = image.astype(np.int16)  # ensure int16 (it may be here uint16 for some images )
    image[image == -2000] = 0   #correcting cyindrical bound entrioes to 0

    # Convert to Hounsfield units (HU)
    # The intercept is usually -1024
    for slice_number in range(len(slices)):  # from v 8
        intercept = slices[slice_number].RescaleIntercept
        slope = slices[slice_number].RescaleSlope

        if slope != 1:  # added 16 Jan 2016, evening
            image[slice_number] = slope * image[slice_number].astype(np.float64)
            image[slice_number] = image[slice_number].astype(np.int16)

        image[slice_number] += np.int16(intercept)

    return np.array(image, dtype=np.int16)
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_3d_data_hu(path):  # get data in Hunsfield Units
    slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
    #slices.sort(key=lambda x: int(x.InstanceNumber))  # was x.InstanceNumber
    #slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))  # from v8 - BUGGY 
    slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))  # from 22.02

    image = np.stack([s.pixel_array for s in slices])
    image = image.astype(np.int16)  # ensure int16 (it may be here uint16 for some images )
    image[image == -2000] = 0   #correcting cyindrical bound entrioes to 0

    # Convert to Hounsfield units (HU)
    # The intercept is usually -1024
    for slice_number in range(len(slices)):  # from v 8
        intercept = slices[slice_number].RescaleIntercept
        slope = slices[slice_number].RescaleSlope

        if slope != 1:  # added 16 Jan 2016, evening
            image[slice_number] = slope * image[slice_number].astype(np.float64)
            image[slice_number] = image[slice_number].astype(np.int16)

        image[slice_number] += np.int16(intercept)

    return np.array(image, dtype=np.int16)
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy_nanstd(self):
        source = [np.random.random((16, 12, 5)) for _ in range(10)]
        for arr in source:
            arr[randint(0, 15), randint(0, 11), randint(0, 4)] = np.nan
        stack = np.stack(source, axis = -1)

        for axis in (0, 1, 2, None):
            for ddof in range(4):
                with self.subTest('axis = {}, ddof = {}'.format(axis, ddof)):
                    from_numpy = np.nanstd(stack, axis = axis, ddof = ddof)
                    from_ivar = last(istd(source, axis = axis, ddof = ddof, ignore_nan = True))
                    self.assertSequenceEqual(from_numpy.shape, from_ivar.shape)
                    self.assertTrue(np.allclose(from_ivar, from_numpy))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy(self):
        """ Test iall against numpy.all """
        stream = [np.zeros((8, 16, 2)) for _ in range(11)]
        stream[3][3,0,1] = 1    # so that np.all(axis = None) evaluates to False
        stack = np.stack(stream, axis = -1)

        with self.subTest('axis = None'):
            from_numpy = np.all(stack, axis = None)
            from_stream = last(iall(stream, axis = None))
            self.assertEqual(from_numpy, from_stream)

        for axis in range(stack.ndim):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.all(stack, axis = axis)
                from_stream = last(iall(stream, axis = axis))
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:xarray-simlab    作者:benbovy    | 项目源码 | 文件源码
def snapshot_to_xarray_variable(self, key, clock=None):
        """Convert snapshots taken for a specific model variable to an
        xarray.Variable object.
        """
        proc_name, var_name = key
        variable = self.model._processes[proc_name]._variables[var_name]

        array_list = self.snapshot_values[key]
        first_array = array_list[0]

        if len(array_list) == 1:
            data = first_array
        else:
            data = np.stack(array_list)

        dims = _get_dims_from_variable(first_array, variable)
        if clock is not None and len(array_list) > 1:
            dims = (clock,) + dims

        attrs = variable.attrs.copy()
        attrs['description'] = variable.description

        return xr.Variable(dims, data, attrs=attrs)
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def get_interv_table(model,intrv=True):

    n_batches=25
    table_outputs=[]
    d_vals=np.linspace(TINY,0.6,n_batches)
    for name in model.cc.node_names:
        outputs=[]
        for d_val in d_vals:
            do_dict={model.cc.node_dict[name].label_logit : d_val*np.ones((model.batch_size,1))}
            outputs.append(model.sess.run(model.fake_labels,do_dict))

        out=np.vstack(outputs)
        table_outputs.append(out)

    table=np.stack(table_outputs,axis=2)

    np.mean(np.round(table),axis=0)

    return table

#dT=pd.DataFrame(index=p_names, data=T, columns=do_names)
#T=np.mean(np.round(table),axis=0)
#table=get_interv_table(model)
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def preprocess_images(images):
    if images.shape[0] < 4:
        # single image
        x_t = images[0]
        x_t = imresize(x_t, (80, 80))
        x_t = x_t.astype("float")
        x_t /= 255.0
        s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
    else:
        # 4 images
        xt_list = []
        for i in range(images.shape[0]):
            x_t = imresize(images[i], (80, 80))
            x_t = x_t.astype("float")
            x_t /= 255.0
            xt_list.append(x_t)
        s_t = np.stack((xt_list[0], xt_list[1], xt_list[2], xt_list[3]), 
                       axis=2)
    s_t = np.expand_dims(s_t, axis=0)
    return s_t

############################# main ###############################
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def preprocess_images(images):
    if images.shape[0] < 4:
        # single image
        x_t = images[0]
        x_t = imresize(x_t, (80, 80))
        x_t = x_t.astype("float")
        x_t /= 255.0
        s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
    else:
        # 4 images
        xt_list = []
        for i in range(images.shape[0]):
            x_t = imresize(images[i], (80, 80))
            x_t = x_t.astype("float")
            x_t /= 255.0
            xt_list.append(x_t)
        s_t = np.stack((xt_list[0], xt_list[1], xt_list[2], xt_list[3]), 
                       axis=2)
    s_t = np.expand_dims(s_t, axis=0)
    return s_t
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def calc_metrics(self, data_gen, history, dataset, logs):
        y_true = []
        predictions = []
        for i in range(data_gen.steps):
            if self.verbose == 1:
                print "\r\tdone {}/{}".format(i, data_gen.steps),
            (x,y) = next(data_gen)
            pred = self.model.predict(x, batch_size=self.batch_size)
            if isinstance(x, list) and len(x) == 2: # deep supervision
                for m, t, p in zip(x[1].flatten(), y.flatten(), pred.flatten()):
                    if np.equal(m, 1):
                        y_true.append(t)
                        predictions.append(p)
            else:
                y_true += list(y.flatten())
                predictions += list(pred.flatten())
        print "\n"
        predictions = np.array(predictions)
        predictions = np.stack([1-predictions, predictions], axis=1)
        ret = metrics.print_metrics_binary(y_true, predictions)
        for k, v in ret.iteritems():
            logs[dataset + '_' + k] = v
        history.append(ret)
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def calc_metrics(self, data, history, dataset, logs):
        y_true = []
        predictions = []
        B = self.batch_size
        for i in range(0, len(data[0]), B):
            if self.verbose == 1:
                print "\r\tdone {}/{}".format(i, len(data[0])),
            (x,y) = (data[0][i:i+B], data[1][i:i+B])
            outputs = self.model.predict(x, batch_size=B)

            if isinstance(y[0], list): # target replication
                y_true += list(y[0].flatten())
                predictions += list(outputs[0].flatten())
            else:
                y_true += list(np.array(y).flatten())
                predictions += list(outputs.flatten())
        print "\n"
        predictions = np.array(predictions)
        predictions = np.stack([1-predictions, predictions], axis=1)
        ret = metrics.print_metrics_binary(y_true, predictions)
        for k, v in ret.iteritems():
            logs[dataset + '_' + k] = v
        history.append(ret)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def get_document_batch(self, doc_id):
        """builds batch of all mention pairs in one document

        Args:
            doc_id: id of document

        Returns:
            feature representation of mentions and labels
        """
        mentions = self.dl.get_all_mentions_from_doc(doc_id)
        if len(mentions) == 0:
            return None, None
        A, B = [], []
        for a in mentions:
            for b in mentions:
                A.append(a)
                B.append(b)
        A_f = [self._mention_to_features(m) for m in A]
        B_f = [self._mention_to_features(m) for m in B]
        AB_f = self._pair_features(A, B)
        A = [self.dl.mention_features[m] for m in A]
        B = [self.dl.mention_features[m] for m in B]
        return np.vstack(A), np.stack(A_f), np.vstack(B), np.stack(B_f), np.stack(AB_f)
项目:wurst    作者:IndEcol    | 项目源码 | 文件源码
def get_space_separated_data(raw):
    assert raw[0].strip().endswith("= [")
    assert raw[-1].strip().endswith("];")

    header = raw[0].replace("= [", "").strip()
    shape = tuple(eval(header[header.index("["):header.index("]") + 1]))
    data = [eval(line.strip().replace("  ", ",")) for line in raw[1:-1]]

    if len(shape) == 1:
        step_size = 1
    else:
        step_size = functools.reduce(operator.mul, shape[:-1])

    years = np.array(data[::step_size + 1], dtype=int)

    subarrays = [
        np.array(data[index * (step_size + 1) + 1:(index + 1) * (step_size + 1)]).reshape(shape)
        for index in range(len(years))
    ]
    return header, years, np.stack(subarrays, axis=-1)
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def generate(self):

        for pid in self.id2candidates_path.iterkeys():
            patient_path = self.id2patient_path[pid]
            print pid, patient_path
            img, pixel_spacing = utils_lung.read_dicom_scan(patient_path)

            print self.id2candidates_path[pid]
            candidates = utils.load_pkl(self.id2candidates_path[pid])
            print candidates.shape
            for candidate in candidates:
                y_batch = np.array(candidate, dtype='float32')
                patch_center = candidate[:3]
                batch = []
                for i in range(self.tta):
                    batch.append(np.float32(self.data_prep_fun(data=img,
                                                        patch_center=patch_center,
                                                        pixel_spacing=pixel_spacing)))
                x_batch = np.stack(batch)
                print x_batch.shape

                yield x_batch, y_batch, [pid]
项目:ISLES2017    作者:MiguelMonteiro    | 项目源码 | 文件源码
def adjust_prediction(self, probability, image):
        crf = dcrf.DenseCRF(np.prod(probability.shape), 2)
        # crf = dcrf.DenseCRF(np.prod(probability.shape), 1)

        binary_prob = np.stack((1 - probability, probability), axis=0)
        unary = unary_from_softmax(binary_prob)
        # unary = unary_from_softmax(np.expand_dims(probability, axis=0))
        crf.setUnaryEnergy(unary)

        # per dimension scale factors
        sdims = [self.sdims] * 3
        smooth = create_pairwise_gaussian(sdims=sdims, shape=probability.shape)
        crf.addPairwiseEnergy(smooth, compat=2)

        if self.schan:
            # per channel scale factors
            schan = [self.schan] * 6
            appearance = create_pairwise_bilateral(sdims=sdims, schan=schan, img=image, chdim=3)
            crf.addPairwiseEnergy(appearance, compat=2)

        result = crf.inference(self.iter)
        crf_prediction = np.argmax(result, axis=0).reshape(probability.shape).astype(np.float32)

        return crf_prediction
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def read_images( filenames, domain=None, image_size=64):

    images = []

    for fn in filenames:
        image = cv2.imread(fn)
        if image is None:
            continue

        if domain == 'A':
            kernel = np.ones((3,3), np.uint8)
            image = image[:, :256, :]
            image = 255. - image
            image = cv2.dilate( image, kernel, iterations=1 )
            image = 255. - image
        elif domain == 'B':
            image = image[:, 256:, :]

        image = cv2.resize(image, (image_size,image_size))
        image = image.astype(np.float32) / 255.
        image = image.transpose(2,0,1)
        images.append( image )

    images = np.stack( images )
    return images
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
        if not hasattr(self, 'plot_data'):
            self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())}
        self.plot_data['X'].append(epoch + counter_ratio)
        self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
        self.vis.line(
            X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1),
            Y=np.array(self.plot_data['Y']),
            opts={
                'title': self.name + ' loss over time',
                'legend': self.plot_data['legend'],
                'xlabel': 'epoch',
                'ylabel': 'loss'},
            win=self.display_id)

    # errors: same format as |errors| of plotCurrentErrors
项目:droppy    作者:BV-DR    | 项目源码 | 文件源码
def mapFunction( x , y , func , ax = None, arrayInput = False, n = 10, title = None, **kwargs ) :
   """
      Plot function on a regular grid
        x : 1d array
        y : 1d array
        func : function to map
        arrayInput : False if func(x,y) , True if func( [x,y] )
   """

   if ax is None :
      fig , ax = plt.subplots()

   X , Y = np.meshgrid( x , y )

   if not arrayInput :
      Z = func( X.flatten() , Y.flatten() ).reshape(X.shape)
   else :
      Z = func( np.stack( [ X.flatten() , Y.flatten() ]) )

   ax.contourf( X , Y , Z , n , **kwargs)

   if title is not None : ax.set_title(title)

   return ax
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def process_observation(self, obs):
        """
        Take in the current observation, do any necessary processing and return
        the processed observation.

        A return value of None indicates that there is no observation yet. A
        random action will be taken.
        """
        self.current_sequence.append(obs)
        if len(self.current_sequence) < self.observations:
            return None

        if len(self.current_sequence) > self.observations:
            self.current_sequence.pop(0)

        # convert current sequence to input
        # stacking essentially adds a single axis, want it to be after
        obs_seq = np.stack(self.current_sequence, axis=len(obs.shape))
        return obs_seq
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def wavelength_to_XYZ(wavelength, observer='1931_2deg'):
    ''' Uses tristimulus color matching functions to map a awvelength to XYZ
        coordinates.

    Args:
        wavelength (`float`): wavelength in nm.

        observer (`str`): CIE observer name, must be 1931_2deg.

    Returns:
        `numpy.ndarray`: array with last dimension corresponding to X, Y, Z.

    '''
    wavelength = np.asarray(wavelength, dtype=config.precision)

    cmf = get_cmf(observer)
    wvl, X, Y, Z = cmf['wvl'], cmf['X'], cmf['Y'], cmf['Z']

    ia = {'bounds_error': False, 'fill_value': 0, 'assume_sorted': True}
    f_X, f_Y, f_Z = interp1d(wvl, X, **ia), interp1d(wvl, Y, **ia), interp1d(wvl, Z, **ia)
    x, y, z = f_X(wavelength), f_Y(wavelength), f_Z(wavelength)

    shape = wavelength.shape
    return np.stack((x, y, z), axis=len(shape))
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def XYZ_to_uvprime(XYZ):
    ''' Converts XYZ points to u'v' points.

    Args:
        XYZ (`numpy.ndarray`): ndarray with last dimension corresponding to
            X, Y, Z.

    Returns:
        `tuple` containing:

            `numpy.ndarray`: u' coordinates.

            `numpy.ndarray`: u' coordinates.

    '''
    XYZ = np.asarray(XYZ)
    X, Y, Z = XYZ[..., 0], XYZ[..., 1], XYZ[..., 2]
    u = (4 * X) / (X + 15 * Y + 3 * Z)
    v = (9 * Y) / (X + 15 * Y + 3 * Z)

    shape = u.shape
    return np.stack((u, v), axis=len(shape))
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def Luv_to_chroma_hue(luv):
    ''' Converts L*u*v* coordiantes to a chroma and hue.

    Args:
        luv (`numpy.ndarray`): array with last dimension L*, u*, v*.

    Returns:
        `numpy.ndarray` with last dimension corresponding to C* and h.

    '''
    luv = np.asarray(luv)
    u, v = luv[..., 1], luv[..., 2]
    C = sqrt(u**2 + v**2)
    h = atan2(v, u)

    shape = luv.shape
    return np.stack((C, h), axis=len(shape))
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def uvprime_to_xy(uv):
    ''' Converts u' v' points to xyY x,y points.

    Args:
        uv (`numpy.ndarray`): ndarray with last dimension corresponding to
            u', v'.

    Returns:
        `tuple` containing:

            `numpy.ndarray`: x coordinates.

            `numpy.ndarray`: y coordinates.

    '''
    uv = np.asarray(uv)
    u, v = uv[..., 0], uv[..., 1]
    x = (9 * u) / (6 * u - 16 * v + 12)
    y = (4 * v) / (6 * u - 16 * v + 12)

    shape = x.shape
    return np.stack((x, y), axis=len(shape))
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def __call__(self, process_func):

        def wrapper(*args):
            data_obj = args[1]
            if (len(data_obj.shape) <= self.input_dim
                or data_obj.shape[-1] == 1):
                return process_func(*args)
            else:
                pool = mp.Pool(mp.cpu_count())# TODO: make configurable
                arglist = [
                    (args[0],) +
                    (data_obj[...,i],) +
                    args[2:]
                    for i in range(data_obj.shape[-1])
                ]
                result = pool.map(self.worker, arglist)
                if self.output_dim > self.input_dim: # expanding
                    return np.stack(result, -1)
                else: # contracting
                    return np.concatenate(result, -1)
        return wrapper
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def rvs(self, size=1):
        '''
        Generates random variates from the copula.

        Parameters
        ----------
        size : integer, optional
            The number of samples to generate.  (Default: 1)

        Returns
        -------
        samples : array_like
            n-by-2 matrix of samples where n is the number of samples.
        '''
        samples = np.stack((uniform.rvs(size=size), uniform.rvs(size=size)),
                           axis=1)
        samples[:, 0] = self.ppcf(samples)
        return samples
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
        if not hasattr(self, 'plot_data'):
            self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())}
        self.plot_data['X'].append(epoch + counter_ratio)
        self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
        self.vis.line(
            X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1),
            Y=np.array(self.plot_data['Y']),
            opts={
                'title': self.name + ' loss over time',
                'legend': self.plot_data['legend'],
                'xlabel': 'epoch',
                'ylabel': 'loss'},
            win=self.display_id)

    # errors: same format as |errors| of plotCurrentErrors
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:u1234x1234    | 项目源码 | 文件源码
def colorize_raster(masks):
    ''' (H, W, 10) -> (H, W, 3)
    '''
    assert masks.shape[2] == 10
    palette = np.array([(180, 180, 180), (100, 100, 100),  # Buildings, Misc.
                        (6, 88, 179), (125, 194, 223),  # Road, Track
                        (55, 120, 27), (160, 219, 166),  # Trees, Crops
                        (209, 173, 116), (180, 117, 69),  # Waterway, Standing
                        (67, 109, 244), (39, 48, 215)], dtype=np.uint8)  # Car

    r = []
    for obj_type in range(10):
        c = palette[obj_type]
        result = np.stack([masks[:, :, obj_type]] * 3, axis=2)
        r.append(result * c)
    r = np.stack(r)
    r = np.max(r, axis=0)
    return r
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def load_embeddings(filename):
  """Loads embedings, returns weight matrix and dict from words to indices."""
  weight_vectors = []
  word_idx = {}
  with codecs.open(filename, encoding='utf-8') as f:
    for line in f:
      word, vec = line.split(u' ', 1)
      word_idx[word] = len(weight_vectors)
      weight_vectors.append(np.array(vec.split(), dtype=np.float32))
  # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and
  # '-RRB-' respectively in the parse-trees.
  word_idx[u'-LRB-'] = word_idx.pop(u'(')
  word_idx[u'-RRB-'] = word_idx.pop(u')')
  # Random embedding vector for unknown words.
  weight_vectors.append(np.random.uniform(
      -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))
  return np.stack(weight_vectors), word_idx
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_batch(batch_size):
    batch_idx = np.random.choice(len(data),batch_size)
    batch_sequences = [data[idx] for idx in batch_idx]
    strokes = []
    lengths = []
    indice = 0
    for seq in batch_sequences:
        len_seq = len(seq[:,0])
        new_seq = np.zeros((Nmax,5))
        new_seq[:len_seq,:2] = seq[:,:2]
        new_seq[:len_seq-1,2] = 1-seq[:-1,2]
        new_seq[:len_seq,3] = seq[:,2]
        new_seq[(len_seq-1):,4] = 1
        new_seq[len_seq-1,2:4] = 0
        lengths.append(len(seq[:,0]))
        strokes.append(new_seq)
        indice += 1

    if use_cuda:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
    else:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
    return batch, lengths

################################ adaptive lr
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_target(self, batch, lengths):
        if use_cuda:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1]).cuda()).unsqueeze(0)
        else:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1])).unsqueeze(0)
        batch = torch.cat([batch, eos], 0)
        mask = torch.zeros(Nmax+1, batch.size()[1])
        for indice,length in enumerate(lengths):
            mask[:length,indice] = 1
        if use_cuda:
            mask = Variable(mask.cuda()).detach()
        else:
            mask = Variable(mask).detach()
        dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach()
        dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach()
        p1 = Variable(batch.data[:,:,2]).detach()
        p2 = Variable(batch.data[:,:,3]).detach()
        p3 = Variable(batch.data[:,:,4]).detach()
        p = torch.stack([p1,p2,p3],2)
        return mask,dx,dy,p
项目:audio    作者:pytorch    | 项目源码 | 文件源码
def __call__(self, tensor):
        """

        Args:
            tensor (Tensor): Tensor of audio of size (samples x channels)

        Returns:
            tensor (Tensor): n_mels x hops x channels (BxLxC), where n_mels is
                the number of mel bins, hops is the number of hops, and channels
                is unchanged.

        """
        if librosa is None:
            print("librosa not installed, cannot create spectrograms")
            return tensor
        L = []
        for i in range(tensor.size(1)):
            nparr = tensor[:, i].numpy()  # (samples, )
            sgram = librosa.feature.melspectrogram(
                nparr, **self.kwargs)  # (n_mels, hops)
            L.append(sgram)
        L = np.stack(L, 2)  # (n_mels, hops, channels)
        tensor = torch.from_numpy(L).type_as(tensor)

        return tensor
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_shapes(self):
        dims = [
            ((1, 1), (2, 1, 1)),     # broadcast first argument
            ((2, 1, 1), (1, 1)),     # broadcast second argument
            ((2, 1, 1), (2, 1, 1)),  # matrix stack sizes match
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            res = self.matmul(a, b)
            assert_(res.shape == (2, 1, 1))

        # vector vector returns scalars.
        for dt in self.types:
            a = np.ones((2,), dtype=dt)
            b = np.ones((2,), dtype=dt)
            c = self.matmul(a, b)
            assert_(np.array(c).shape == ())
项目:DRLModule    作者:halleanwoo    | 项目源码 | 文件源码
def train_priority(self , state , reward , action , state_next , done, batch_ISweight):
        q , q_target = self.sess.run([self.q_value , self.q_target] ,
                                     feed_dict={self.inputs_q : state , self.inputs_target : state_next } )
        # DoubleDQN
        if self.double:
            q_next = self.sess.run(self.q_value , feed_dict={self.inputs_q : state_next})
            action_best = np.argmax(q_next , axis = 1)
            q_target_best = self.sess.run(self.q_target_action , feed_dict={self.action_best : action_best,
                                                                            self.q_target : q_target})
        else:
            q_target_best = np.max(q_target , axis = 1)   # dqn

        q_target_best_mask = ( 1.0 - done) * q_target_best
        target = reward + self.gamma * q_target_best_mask
        batch_ISweight = np.stack([batch_ISweight , batch_ISweight] , axis = -1 )
        loss, td_error, _ = self.sess.run([self.loss , self.td_error, self.train_op] ,
                                 feed_dict={self.inputs_q: state , self.target:target , self.action:action, self.ISweight : batch_ISweight ,} ) 
        return td_error
        # self.loss_his.append(loss)


# ===============================================================
#                             A3C Agent
# ===============================================================
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def stack_and_pad(values: List[Union[np.ndarray, int, float]], pad=0) -> np.ndarray:
    """Pads a list of numpy arrays so that they have equal dimensions, then stacks them."""
    if isinstance(values[0], int) or isinstance(values[0], float):
        return np.array(values)

    dims = len(values[0].shape)
    max_shape = [max(sizes) for sizes in zip(*[v.shape for v in values])]

    padded_values = []

    for value in values:
        pad_width = [(0, max_shape[i] - value.shape[i])
                     for i in range(dims)]
        padded_value = np.lib.pad(value, pad_width, mode='constant',
                                  constant_values=pad)
        padded_values.append(padded_value)

    return np.stack(padded_values)
项目:pytorch-skipthoughts    作者:kaniblu    | 项目源码 | 文件源码
def join_embeddings(src_we, target_we):
    """joins and filters words not in common and produces two tensors"""
    src_w = set(src_we.keys())
    target_w = set(target_we.keys())
    common_w = src_w & target_w

    src_tensor = []
    target_tensor = []
    for w in common_w:
        src_tensor.append(src_we[w])
        target_tensor.append(target_we[w])

    src_tensor = torch.Tensor(np.stack(src_tensor))
    target_tensor = torch.Tensor(np.stack(target_tensor))

    return src_tensor, target_tensor
项目:blitznet    作者:dvornikita    | 项目源码 | 文件源码
def decode_bboxes(tcoords, anchors):
    var_x, var_y, var_w, var_h = config['prior_variance']
    t_x = tcoords[:, 0]*var_x
    t_y = tcoords[:, 1]*var_y
    t_w = tcoords[:, 2]*var_w
    t_h = tcoords[:, 3]*var_h
    a_w = anchors[:, 2]
    a_h = anchors[:, 3]
    a_x = anchors[:, 0]+a_w/2
    a_y = anchors[:, 1]+a_h/2
    x = t_x*a_w + a_x
    y = t_y*a_h + a_y
    w = tf.exp(t_w)*a_w
    h = tf.exp(t_h)*a_h

    x1 = tf.maximum(0., x - w/2)
    y1 = tf.maximum(0., y - h/2)
    x2 = tf.minimum(1., w + x1)
    y2 = tf.minimum(1., h + y1)
    return tf.stack([y1, x1, y2, x2], axis=1)
项目:blitznet    作者:dvornikita    | 项目源码 | 文件源码
def encode_bboxes(proposals, gt):
    prop_x = proposals[:, 0]
    prop_y = proposals[:, 1]
    prop_w = proposals[:, 2]
    prop_h = proposals[:, 3]

    gt_x = gt[:, 0]
    gt_y = gt[:, 1]
    gt_w = gt[:, 2]
    gt_h = gt[:, 3]

    diff_x = (gt_x + 0.5*gt_w - prop_x - 0.5*prop_w)/prop_w
    diff_y = (gt_y + 0.5*gt_h - prop_y - 0.5*prop_h)/prop_h
    if len(gt) > 0 and (np.min(gt_w/prop_w) < 1e-6 or np.min(gt_h/prop_h) < 1e-6):
        print(np.min(gt_w), np.min(gt_h), np.min(gt_w/prop_w), np.max(gt_h/prop_h))
    diff_w = np.log(gt_w/prop_w)
    diff_h = np.log(gt_h/prop_h)

    var_x, var_y, var_w, var_h = config['prior_variance']
    x = np.stack([diff_x/var_x, diff_y/var_y, diff_w/var_w, diff_h/var_h],
                 axis=1)
    return x
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def batch(states, batch_size=None):
  """Combines a collection of state structures into a batch, padding if needed.

  Args:
    states: A collection of individual nested state structures.
    batch_size: The desired final batch size. If the nested state structure
        that results from combining the states is smaller than this, it will be
        padded with zeros.
  Returns:
    A single state structure that results from stacking the structures in
    `states`, with padding if needed.

  Raises:
    ValueError: If the number of input states is larger than `batch_size`.
  """
  if batch_size and len(states) > batch_size:
    raise ValueError('Combined state is larger than the requested batch size')

  def stack_and_pad(*states):
    stacked = np.stack(states)
    if batch_size:
      stacked.resize([batch_size] + list(stacked.shape)[1:])
    return stacked
  return tf_nest.map_structure(stack_and_pad, *states)
项目:caltech-machine-learning    作者:zhiyanfoo    | 项目源码 | 文件源码
def second_order_nic(x):
    """
    transform             
    x1 x2  --->   1 x1 x2 x1x2 x1**2 x2**2

    nic : no initial constant
    """
    ones = np.ones(len(x))
    x1 = x[:, 0]
    x2 = x[:, 1]
    x1_sqr = x1**2
    x2_sqr = x2**2
    x1x2 = x1 * x2
    return np.stack([ones, x1, x2, x1x2, x1_sqr, x2_sqr], axis=1)

# STOCHASTIC GRADIENT DESCENT
项目:caltech-machine-learning    作者:zhiyanfoo    | 项目源码 | 文件源码
def generate_quadratic_parameters(trials):
    """ax^2 + b"""
    def transform(x):
        """
        transform             
        x1  --->   1 x1**2
        """
        ones = np.ones(len(x))
        x1 = x[:, 0]
        x1_sqr = x1 ** 2 
        return np.stack([ones, x1_sqr], axis=1)

    new_trials = [
            DataML((training_set.z, training_set.y), transform)
        for training_set in trials ]
    weights = [ linear_percepton(training_set.z, training_set.y) for training_set in new_trials ]
    return np.array(weights)
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def recalculate_objects(pred_dict, image):
    proposals = pred_dict['rpn_prediction']['proposals']
    proposals_prob = pred_dict['classification_prediction']['rcnn']['cls_prob']
    proposals_target = proposals_prob.argmax(axis=1) - 1
    bbox_offsets = pred_dict[
        'classification_prediction']['rcnn']['bbox_offsets']

    bbox_offsets = bbox_offsets[proposals_target >= 0]
    proposals = proposals[proposals_target >= 0]
    proposals_target = proposals_target[proposals_target >= 0]

    bbox_offsets_idx_pairs = np.stack(
        np.array([
            proposals_target * 4, proposals_target * 4 + 1,
            proposals_target * 4 + 2, proposals_target * 4 + 3]), axis=1)
    bbox_offsets = np.take(bbox_offsets, bbox_offsets_idx_pairs.astype(np.int))

    bboxes = decode(proposals, bbox_offsets)

    return bboxes, proposals_target
项目:PixelDCN    作者:HongyangGao    | 项目源码 | 文件源码
def next_batch(self, batch_size):
        batches_ids = set()
        while len(batches_ids) < batch_size:
            h = random.randint(0, self.t_h-self.h)
            w = random.randint(0, self.t_w-self.w)
            d = random.randint(0, self.t_d-self.d)
            batches_ids.add((h, w, d))
        image_batches = []
        label_batches = []
        for h, w, d in batches_ids:
            image_batches.append(
                self.images[h:h+self.h, w:w+self.w, d:d+self.d])
            label_batches.append(
                self.labels[h:h+self.h, w:w+self.w, d:d+self.d])
        images = np.expand_dims(np.stack(image_batches, axis=0), axis=-1)
        images = np.transpose(images, (0, 3, 1, 2, 4))
        labels = np.stack(label_batches, axis=0)
        labels = np.transpose(labels, (0, 3, 1, 2))
        return images, labels
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def process(input_dir, output_dir, model_dir, resizing_size, gpu):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3, visible_device_list=gpu)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)).as_default():
        m = loader.LoadedModel(model_dir)

    os.makedirs(output_dir, exist_ok=True)

    input_filenames = glob(os.path.join(input_dir, '*.jpg')) + \
                      glob(os.path.join(input_dir, '*.png')) + \
                      glob(os.path.join(input_dir, '*.tif')) + \
                      glob(os.path.join(input_dir, '*.jp2'))

    for path in tqdm(input_filenames):
        img = Image.open(path).resize(resizing_size)
        mat = np.asarray(img)
        if len(mat.shape) == 2:
            mat = np.stack([mat, mat, mat], axis=2)
        predictions = m.predict(mat[None], prediction_key='labels')[0]
        plt.imsave(os.path.join(output_dir, os.path.relpath(path, input_dir)), predictions)
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def test_hexahedron():
    val = quadpy.hexahedron.integrate(
            lambda x: numpy.exp(x[0]),
            quadpy.hexahedron.cube_points([0.0, 1.0], [0.0, 1.0], [0.0, 1.0]),
            quadpy.hexahedron.Product(quadpy.line_segment.NewtonCotesClosed(3))
            )

    val = quadpy.hexahedron.integrate(
            lambda x: [numpy.exp(x[0]), numpy.exp(x[1])],
            numpy.stack([
                quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]),
                quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]),
                quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]),
                quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]),
                quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]),
                ], axis=-2),
            quadpy.hexahedron.Product(quadpy.line_segment.NewtonCotesClosed(3))
            )
    assert val.shape == (2, 5)
    return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def test_quadrilateral():
    quadpy.quadrilateral.integrate(
            lambda x: numpy.exp(x[0]),
            quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
            quadpy.quadrilateral.Stroud('C2 5-4')
            )

    val = quadpy.quadrilateral.integrate(
            lambda x: [numpy.exp(x[0]), numpy.exp(x[1])],
            numpy.stack([
                quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
                quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
                quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
                quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
                quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]),
                ], axis=-2),
            quadpy.quadrilateral.Stroud('C2 3-1')
            )
    assert val.shape == (2, 5)
    return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def test_tetrahedron():
    quadpy.tetrahedron.integrate(
            lambda x: numpy.exp(x[0]),
            numpy.array([
                [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
                ], dtype=float),
            quadpy.tetrahedron.ShunnHam(3)
            )

    quadpy.tetrahedron.integrate(
            lambda x: [numpy.exp(x[0]), numpy.exp(x[1])],
            numpy.stack([
                [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]],
                [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]],
                [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]],
                [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]],
                ], axis=-2),
            quadpy.tetrahedron.ShunnHam(3)
            )
    return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def test_triangle():
    quadpy.triangle.integrate(
            lambda x: numpy.exp(x[0]),
            numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]),
            quadpy.triangle.Cubtri()
            )

    val = quadpy.triangle.integrate(
            lambda x: [numpy.exp(x[0]), numpy.exp(x[1])],
            numpy.stack([
                [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]],
                [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]],
                [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]],
                [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]],
                [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]],
                ], axis=-2),
            quadpy.triangle.Cubtri()
            )
    assert val.shape == (2, 5)
    return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def transform(xi, cube):
    '''Transform the points `xi` from the reference cube to `cube`.
    '''
    # For d==2, the result used to be computed with
    #
    # out = (
    #     + outer(0.25*(1.0-xi[0])*(1.0-xi[1]), cube[0, 0])
    #     + outer(0.25*(1.0+xi[0])*(1.0-xi[1]), cube[1, 0])
    #     + outer(0.25*(1.0-xi[0])*(1.0+xi[1]), cube[0, 1])
    #     + outer(0.25*(1.0+xi[0])*(1.0+xi[1]), cube[1, 1])
    #     )
    #
    # This array of multiplications and additions is reminiscent of dot(), and
    # indeed tensordot() can handle the situation. We just need to compute the
    # `1+-xi` products and align them with `cube`.
    one_mp_xi = numpy.stack([
        0.5 * (1.0 - xi),
        0.5 * (1.0 + xi),
        ], axis=1)
    a = helpers.n_outer(one_mp_xi)

    # TODO kahan tensordot
    # <https://stackoverflow.com/q/45372098/353337>
    d = xi.shape[0]
    return numpy.tensordot(a, cube, axes=(range(d), range(d)))