Python astropy.io.fits 模块,writeto() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用astropy.io.fits.writeto()

项目:integration-prototype    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def save_image(imager, grid_data, grid_norm, output_file):
    """Makes an image from gridded visibilities and saves it to a FITS file.

    Args:
        imager (oskar.Imager):          Handle to configured imager.
        grid_data (numpy.ndarray):      Final visibility grid.
        grid_norm (float):              Grid normalisation to apply.
        output_file (str):              Name of output FITS file to write.
    """
    # Make the image (take the FFT, normalise, and apply grid correction).
    imager.finalise_plane(grid_data, grid_norm)
    grid_data = numpy.real(grid_data)

    # Trim the image if required.
    border = (imager.plane_size - imager.image_size) // 2
    if border > 0:
        end = border + imager.image_size
        grid_data = grid_data[border:end, border:end]

    # Write the FITS file.
    hdr = fits.header.Header()
    fits.writeto(output_file, grid_data, hdr, clobber=True)
项目:integration-prototype    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def save_image(imager, grid_data, grid_norm, output_file):
    """Makes an image from gridded visibilities and saves it to a FITS file.

    Args:
        imager (oskar.Imager):          Handle to configured imager.
        grid_data (numpy.ndarray):      Final visibility grid.
        grid_norm (float):              Grid normalisation to apply.
        output_file (str):              Name of output FITS file to write.
    """
    # Make the image (take the FFT, normalise, and apply grid correction).
    imager.finalise_plane(grid_data, grid_norm)
    grid_data = numpy.real(grid_data)

    # Trim the image if required.
    border = (imager.plane_size - imager.image_size) // 2
    if border > 0:
        end = border + imager.image_size
        grid_data = grid_data[border:end, border:end]

    # Write the FITS file.
    hdr = fits.header.Header()
    fits.writeto(output_file, grid_data, hdr, clobber=True)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def export_image_to_fits(im: Image, fitsfile: str = 'imaging.fits'):
    """ Write an image to fits

    :param im: Image
    :param fitsfile: Name of output fits file
    """
    assert isinstance(im, Image), im
    return fits.writeto(filename=fitsfile, data=im.data, header=im.wcs.to_header(), overwrite=True)
项目:ZOGY    作者:pmvreeswijk    | 项目源码 | 文件源码
def ds9_arrays(**kwargs):

    cmd = ['ds9', '-zscale', '-zoom', '4', '-cmap', 'heat']
    for name, array in kwargs.items():
        # write array to fits
        fitsfile = 'ds9_'+name+'.fits'
        fits.writeto(fitsfile, np.array(array).astype(np.float32), clobber=True)            
        # append to command
        cmd.append(fitsfile)

    #print 'cmd', cmd
    result = subprocess.call(cmd)

################################################################################
项目:decode    作者:deshima-dev    | 项目源码 | 文件源码
def savefits(cube, fitsname, **kwargs):
        logger = getLogger('decode.io.savefits')

        ### pick up kwargs
        dropdeg = kwargs.pop('dropdeg', False)
        ndim    = len(cube.dims)

        ### load yaml
        FITSINFO = get_data('decode', 'data/fitsinfo.yaml')
        hdrdata = yaml.load(FITSINFO)

        ### default header
        if ndim == 2:
            header = fits.Header(hdrdata['dcube_2d'])
            data   = cube.values.T
        elif ndim == 3:
            if dropdeg:
                header = fits.Header(hdrdata['dcube_2d'])
                data   = cube.values[:, :, 0].T
            else:
                header = fits.Header(hdrdata['dcube_3d'])
                data   = cube.values.T
        else:
            raise TypeError(ndim)

        ### update Header
        if cube.coordsys == 'AZEL':
            header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'})
        elif cube.coordsys == 'RADEC':
            header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)})
        else:
            pass
        header.update({'CRVAL1': float(cube.x[0]),
                       'CDELT1': float(cube.x[1] - cube.x[0]),
                       'CRVAL2': float(cube.y[0]),
                       'CDELT2': float(cube.y[1] - cube.y[0]),
                       'DATE': datetime.now(timezone('UTC')).isoformat()})
        if (ndim == 3) and (not dropdeg):
            header.update({'CRVAL3': float(cube.kidid[0])})

        fits.writeto(fitsname, data, header, **kwargs)
        logger.info('{} has been created.'.format(fitsname))
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def saveto(self, filepath, tiff16bit=False):
        filepath = os.path.expanduser(filepath)
        lowfile = filepath.lower()

        # 16-bit TIFF file
        if tiff16bit and lowfile.endswith((".tif",".tiff")):
            # tiff header as uint16 words
            lsNX,msNX = split16(self.shape[0])
            lsNY,msNY = split16(self.shape[1])
            lsBYTES,msBYTES = split16(self.shape[0]*self.shape[1]*6)
            header = ( \
                0x4949, 42, 8, 0,                  #   0: TIFF header (little endian)
                12,                                #   8: number of directory entries
                                                   #  (directory entry: tag,type,count,0,value/offset x 2)
                256, 4, 1, 0, lsNX, msNX,          #  10: ImageWidth, 1 LONG
                257, 4, 1, 0, lsNY, msNY,          #  22: ImageLength, 1 LONG
                258, 3, 3, 0, 158, 0,              #  34: BitsPerSample, 3 SHORT (-> offset!)
                259, 3, 1, 0, 1, 0,                #  46: Compression, 1 SHORT
                262, 3, 1, 0, 2, 0,                #  58: PhotometricInterpretation, 1 SHORT
                273, 4, 1, 0, 180, 0,              #  70: StripOffsets, 1 LONG
                277, 3, 1, 0, 3, 0,                #  82: SamplesPerPixel, 1 SHORT
                278, 4, 1, 0, lsNY, msNY,          #  94: RowsPerStrip, 1 LONG
                279, 4, 1, 0, lsBYTES, msBYTES,    # 106: StripByteCounts, 1 LONG
                282, 5, 1, 0, 164, 0,              # 118: XResolution, 1 RATIONAL (-> offset!)
                283, 5, 1, 0, 172, 0,              # 130: YResolution, 1 RATIONAL (-> offset!)
                296, 3, 1, 0, 2, 0,                # 142: ResolutionUnit, 1 SHORT
                0, 0,                              # 154: IFD list terminator
                16, 16, 16,                        # 158: BitsPerSample value
                72, 0, 1, 0,                       # 164: XResolution value
                72, 0, 1, 0 )                      # 172: YResolution value
                                                   # 180: Image data
            out = open(filepath, 'w')
            out.write(np.array(header,dtype=np.uint16).tostring())
            data = self.scaledpixelarray(0,65535.99)
            out.write(np.flipud(np.rollaxis(data,1)).astype(np.uint16).tostring())
            out.close()

        # standard 8-bit image file
        elif lowfile.endswith((".bmp",".gif",".jpg",".jpeg",".png",".tif",".tiff",".pdf")):
            self.ensurepil(invalidate=False)
            if lowfile.endswith((".jpg",".jpeg")): self.dpil.save(filepath, "JPEG", quality=80)
            elif lowfile.endswith((".png")): self.dpil.save(filepath, "PNG")
            elif lowfile.endswith((".tif",".tiff")): self.dpil.save(filepath, "TIFF")
            elif lowfile.endswith((".pdf")): self.dpil.save(filepath, "PDF")

        # FITS file
        elif lowfile.endswith(".fits"):
            self.ensurearr(invalidate=False)
            data = np.dstack(( self.darr[:,:,2],self.darr[:,:,1],self.darr[:,:,0] ))
            if os.path.exists(filepath): os.remove(filepath)  # avoid message produced by fits library
            pyfits.writeto(filepath, data.T, clobber=True)

        # unsupported type
        else:
            raise ValueError("Filepath argument has unsupported filename extension")

    ## This function plots the image to the current axes in the current matplotlib figure. The image is
    # resampled to fit the axes. If \em fill is True, the image is stretched to fit the axes in both directions
    # changing the image aspect ratio if needed. If \em fill is False (the default), the axes aspect ratio
    # is adjusted so that the image aspect ratio is preserved.
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def saveto(self, filepath, tiff16bit=False):
        filepath = os.path.expanduser(filepath)
        lowfile = filepath.lower()

        # 16-bit TIFF file
        if tiff16bit and lowfile.endswith((".tif",".tiff")):
            # tiff header as uint16 words
            lsNX,msNX = split16(self.shape[0])
            lsNY,msNY = split16(self.shape[1])
            lsBYTES,msBYTES = split16(self.shape[0]*self.shape[1]*6)
            header = ( \
                0x4949, 42, 8, 0,                  #   0: TIFF header (little endian)
                12,                                #   8: number of directory entries
                                                   #  (directory entry: tag,type,count,0,value/offset x 2)
                256, 4, 1, 0, lsNX, msNX,          #  10: ImageWidth, 1 LONG
                257, 4, 1, 0, lsNY, msNY,          #  22: ImageLength, 1 LONG
                258, 3, 3, 0, 158, 0,              #  34: BitsPerSample, 3 SHORT (-> offset!)
                259, 3, 1, 0, 1, 0,                #  46: Compression, 1 SHORT
                262, 3, 1, 0, 2, 0,                #  58: PhotometricInterpretation, 1 SHORT
                273, 4, 1, 0, 180, 0,              #  70: StripOffsets, 1 LONG
                277, 3, 1, 0, 3, 0,                #  82: SamplesPerPixel, 1 SHORT
                278, 4, 1, 0, lsNY, msNY,          #  94: RowsPerStrip, 1 LONG
                279, 4, 1, 0, lsBYTES, msBYTES,    # 106: StripByteCounts, 1 LONG
                282, 5, 1, 0, 164, 0,              # 118: XResolution, 1 RATIONAL (-> offset!)
                283, 5, 1, 0, 172, 0,              # 130: YResolution, 1 RATIONAL (-> offset!)
                296, 3, 1, 0, 2, 0,                # 142: ResolutionUnit, 1 SHORT
                0, 0,                              # 154: IFD list terminator
                16, 16, 16,                        # 158: BitsPerSample value
                72, 0, 1, 0,                       # 164: XResolution value
                72, 0, 1, 0 )                      # 172: YResolution value
                                                   # 180: Image data
            out = open(filepath, 'w')
            out.write(np.array(header,dtype=np.uint16).tostring())
            data = self.scaledpixelarray(0,65535.99)
            out.write(np.flipud(np.rollaxis(data,1)).astype(np.uint16).tostring())
            out.close()

        # standard 8-bit image file
        elif lowfile.endswith((".bmp",".gif",".jpg",".jpeg",".png",".tif",".tiff",".pdf")):
            self.ensurepil(invalidate=False)
            if lowfile.endswith((".jpg",".jpeg")): self.dpil.save(filepath, "JPEG", quality=80)
            elif lowfile.endswith((".png")): self.dpil.save(filepath, "PNG")
            elif lowfile.endswith((".tif",".tiff")): self.dpil.save(filepath, "TIFF")
            elif lowfile.endswith((".pdf")): self.dpil.save(filepath, "PDF")

        # FITS file
        elif lowfile.endswith(".fits"):
            self.ensurearr(invalidate=False)
            data = np.dstack(( self.darr[:,:,2],self.darr[:,:,1],self.darr[:,:,0] ))
            if os.path.exists(filepath): os.remove(filepath)  # avoid message produced by fits library
            pyfits.writeto(filepath, data.T, clobber=True)

        # unsupported type
        else:
            raise ValueError("Filepath argument has unsupported filename extension")

    ## This function plots the image to the current axes in the current matplotlib figure. The image is
    # resampled to fit the axes. If \em fill is True, the image is stretched to fit the axes in both directions
    # changing the image aspect ratio if needed. If \em fill is False (the default), the axes aspect ratio
    # is adjusted so that the image aspect ratio is preserved.
项目:pyphot    作者:mfouesneau    | 项目源码 | 文件源码
def _fits_writeto(filename, data, header=None, output_verify='exception',
                  clobber=False, checksum=False):
    """
    Create a new FITS file using the supplied data/header.
    Patched version of pyfits to correctly include provided header

    Parameters
    ----------
    filename : file path, file object, or file like object
        File to write to.  If opened, must be opened in a writeable binary
        mode such as 'wb' or 'ab+'.

    data : array, record array, or groups data object
        data to write to the new file

    header : `Header` object, optional
        the header associated with ``data``. If `None`, a header
        of the appropriate type is created for the supplied data. This
        argument is optional.

    output_verify : str
        Output verification option.  Must be one of ``"fix"``, ``"silentfix"``,
        ``"ignore"``, ``"warn"``, or ``"exception"``.  May also be any
        combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
        ``+warn``, or ``+exception" (e.g. ``"fix+warn"``).  See :ref:`verify`
        for more info.

    clobber : bool, optional
        If `True`, and if filename already exists, it will overwrite
        the file.  Default is `False`.

    checksum : bool, optional
        If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
        headers of all HDU's written to the file
    """

    hdu = pyfits.convenience._makehdu(data, header)
    hdu.header.update(header.cards)
    if hdu.is_image and not isinstance(hdu, pyfits.PrimaryHDU):
        hdu = pyfits.PrimaryHDU(data, header=header)
    hdu.writeto(filename, clobber=clobber, output_verify=output_verify,
                checksum=checksum)
项目:pyphot    作者:mfouesneau    | 项目源码 | 文件源码
def write(self, fname, **kwargs):
        """ write table into file

        Parameters
        ----------
        fname: str
            filename to export the table into

        .. note::
            additional keywords are forwarded to the corresponding libraries
            :func:`pyfits.writeto` or :func:`pyfits.append`
            :func:`np.savetxt`
        """
        extension = kwargs.pop('extension', None)
        if extension is None:
            extension = fname.split('.')[-1]
        if (extension == 'csv'):
            comments = kwargs.pop('comments', '#')
            delimiter = kwargs.pop('delimiter', ',')
            commentedHeader = kwargs.pop('commentedHeader', False)
            hdr = _ascii_generate_header(self, comments=comments, delimiter=delimiter,
                                         commentedHeader=commentedHeader)
            header = kwargs.pop('header', hdr)
            np.savetxt(fname, self.data, delimiter=delimiter, header=header,
                       comments='', **kwargs)
        elif (extension in ['txt', 'dat']):
            comments = kwargs.pop('comments', '#')
            delimiter = kwargs.pop('delimiter', ' ')
            commentedHeader = kwargs.pop('commentedHeader', True)
            hdr = _ascii_generate_header(self, comments=comments, delimiter=delimiter,
                                         commentedHeader=commentedHeader)
            header = kwargs.pop('header', hdr)
            np.savetxt(fname, self.data, delimiter=delimiter, header=header,
                       comments='', **kwargs)
        elif (extension == 'fits'):
            hdr0 = kwargs.pop('header', None)
            append = kwargs.pop('append', False)
            hdr = _fits_generate_header(self)
            if hdr0 is not None:
                hdr.update(**hdr0)
            if append:
                _fits_append(fname, self.data, hdr, **kwargs)
            else:
                # patched version to correctly include the header
                _fits_writeto(fname, self.data, hdr, **kwargs)
        elif (extension in ('hdf', 'hdf5', 'hd5')):
            _hdf5_write_data(fname, self.data, header=self.header,
                             units=self._units, comments=self._desc,
                             aliases=self._aliases, **kwargs)
        else:
            raise Exception('Format {0:s} not handled'.format(extension))
项目:goodman    作者:soar-telescope    | 项目源码 | 文件源码
def remove_conflictive_keywords(path, file_list):
    """Removes problematic keywords

    The blue camera has a set of keywords whose comments contain non-ascii
    characters, in particular the degree symbol. Those keywords are not
    needed in any stage of the data reduction therefore they are removed.
    The data will be overwritten with the keywords removed. The user will
    need to have backups of raw data.

    Notes:
        This function solves a problem with old data, new headers are compliant
        with the headers.

    Args:
        path (str): Path to the folder containing the files
        file_list (list): List of files to remove keywords

    """
    log_ccd.debug('Removing conflictive keywords in Blue Camera Headers')
    log_ccd.warning('Files will be overwritten')
    for blue_file in file_list:
        full_path = os.path.join(path, blue_file)
        log_ccd.debug('Processing file {:s}'.format(blue_file))
        try:
            data, header = fits.getdata(full_path,
                                        header=True,
                                        ignore_missing_end=True)

            keys_to_remove = ['PARAM0',
                              'PARAM61',
                              'PARAM62',
                              'PARAM63',
                              'NAXIS3']

            if data.ndim == 3:
                header['NAXIS'] = 2
                data = data[0]

                log_ccd.debug('Modified file to be 2D instead of 3D '
                              '(problematic)')

            for keyword in keys_to_remove:
                header.remove(keyword)

                log_ccd.debug('Removed conflictive keyword '
                              '{:s}'.format(keyword))

            log_ccd.debug('Updated headers')

            fits.writeto(full_path,
                         data,
                         header,
                         clobber=True)

        except KeyError as error:
            log_ccd.debug(error)


# spectroscopy specific functions
项目:ZOGY    作者:pmvreeswijk    | 项目源码 | 文件源码
def fits2ldac (header4ext2, data4ext3, fits_ldac_out, doSort=True):

    """This function converts the binary FITS table from Astrometry.net to
    a binary FITS_LDAC table that can be read by PSFex. [header4ext2]
    is what will be recorded as a single long string in the data part
    of the 2nd extension of the output table [fits_ldac_out], and
    [data4ext3] is the data part of an HDU that will define both the
    header and data parts of extension 3 of [fits_ldac_out].

    """

    # convert header to single (very) long string
    ext2_str = header4ext2.tostring(endcard=False, padding=False)

    # if the following line is not added, the very end of the data
    # part of extension 2 is written to a fits table such that PSFex
    # runs into a segmentation fault when attempting to read it (took
    # me ages to find out!).
    ext2_str += 'END                                                                          END'

    # read into string array
    ext2_data = np.array([ext2_str])

    # determine format string for header of extention 2
    formatstr = str(len(ext2_str))+'A'
    # create table 1
    col1 = fits.Column(name='Field Header Card', array=ext2_data, format=formatstr)
    cols = fits.ColDefs([col1])
    ext2 = fits.BinTableHDU.from_columns(cols)
    # make sure these keywords are in the header
    ext2.header['EXTNAME'] = 'LDAC_IMHEAD'
    ext2.header['TDIM1'] = '(80, {0})'.format(len(ext2_str)/80)

    # simply create extension 3 from [data4ext3]
    ext3 = fits.BinTableHDU(data4ext3)
    # extname needs to be as follows
    ext3.header['EXTNAME'] = 'LDAC_OBJECTS'

    # sort output table by number column if needed
    if doSort:
        index_sort = np.argsort(ext3.data['NUMBER'])
        ext3.data = ext3.data[index_sort]

    # create primary HDU
    prihdr = fits.Header()
    prihdu = fits.PrimaryHDU(header=prihdr)
    prihdu.header['EXPTIME'] = header4ext2['EXPTIME']
    prihdu.header['FILTNAME'] = header4ext2['FILTNAME']
    # prihdu.header['SEEING'] = header4ext2['SEEING'] #need to calculte and add
    prihdu.header['BKGSIG'] = header4ext2['SEXBKDEV']


    # write hdulist to output LDAC fits table
    hdulist = fits.HDUList([prihdu, ext2, ext3])
    hdulist.writeto(fits_ldac_out, clobber=True)
    hdulist.close()

################################################################################