Python osgeo.gdal 模块,Open() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用osgeo.gdal.Open()

项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def convert_pixgwktList_to_wgs84wktList(inputRaster, wktPolygonPixList):
    ## returns # [[GeoWKT, PixWKT], ...]
    wgs84WKTList=[]
    if os.path.isfile(inputRaster):
        srcRaster = gdal.Open(inputRaster)
        targetSR = osr.SpatialReference()
        targetSR.ImportFromWkt(srcRaster.GetProjectionRef())
        geomTransform = srcRaster.GetGeoTransform()

    for wktPolygonPix in wktPolygonPixList:
        geom_wkt_list = pixelWKTToGeoWKT(wktPolygonPix, inputRaster, targetSR='',
                                         geomTransform=geomTransform,
                                         breakMultiPolygonPix=False)

        wgs84WKTList.extend(geom_wkt_list)

    # [[GeoWKT, PixWKT], ...]
    return wgs84WKTList
项目:DRIP-SLIP    作者:NASA-DEVELOP    | 项目源码 | 文件源码
def reprojectRaster(src,match_ds,dst_filename):
    src_proj = src.GetProjection()
    src_geotrans = src.GetGeoTransform()

    match_proj = match_ds.GetProjection()
    match_geotrans = match_ds.GetGeoTransform()
    wide = match_ds.RasterXSize
    high = match_ds.RasterYSize

    dst = gdal.GetDriverByName('GTiff').Create(dst_filename, wide, high, 1, gdalconst.GDT_Int16)
    dst.SetGeoTransform( match_geotrans )
    dst.SetProjection( match_proj)

    gdal.ReprojectImage(src, dst, src_proj, match_proj, gdalconst.GRA_Bilinear)

    del dst # Flush
    return(gdal.Open(dst_filename,gdalconst.GA_ReadOnly))


#finds the intersecting extent of a series of scenes (left,right,bottom,top are arrays containing the respective lat/lon of the left,right,bottom,top of each image)
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def get_lulc_source(ds):
    """For a given input dataset extent, select the appropriate mask source (NLCD vs. global bareground)
    """
    ds_geom = geolib.ds_geom(ds)
    lulc_fn = get_nlcd_fn()
    lulc_ds = gdal.Open(lulc_fn)
    lulc_geom = geolib.ds_geom(lulc_ds)
    #If the dem geom is within CONUS (nlcd extent), use it
    geolib.geom_transform(ds_geom, t_srs=lulc_geom.GetSpatialReference())

    if lulc_geom.Contains(ds_geom):
        print("Using NLCD 30m data for rockmask")
        lulc_source = 'nlcd'
    else:
        print("Using global 30m bare ground data for rockmask")
        #Otherwise for Global, use 30 m Bare Earth percentage 
        lulc_source = 'bareground'
        #lulc_fn = get_bareground_fn()
        #lulc_ds = gdal.Open(lulc_fn)
    return lulc_source
项目:DeepOSM    作者:trailbehind    | 项目源码 | 文件源码
def read_naip(file_path, bands_to_use):
    """
    Read in a NAIP, based on www.machinalis.com/blog/python-for-geospatial-data-processing.

    Bands_to_use is an array like [0,0,0,1], designating whether to use each band (R, G, B, IR).
    """
    raster_dataset = gdal.Open(file_path, gdal.GA_ReadOnly)

    bands_data = []
    index = 0
    for b in range(1, raster_dataset.RasterCount + 1):
        band = raster_dataset.GetRasterBand(b)
        if bands_to_use[index] == 1:
            bands_data.append(band.ReadAsArray())
        index += 1
    bands_data = numpy.dstack(bands_data)

    return raster_dataset, bands_data
项目:DeepOSM    作者:trailbehind    | 项目源码 | 文件源码
def tag_with_locations(test_images, predictions, tile_size, state_abbrev):
    """Combine image data with label data, so info can be rendered in a map and list UI.

    Add location data for convenience too.
    """
    combined_data = []
    for idx, img_loc_tuple in enumerate(test_images):
        raster_filename = img_loc_tuple[2]
        raster_dataset = gdal.Open(os.path.join(NAIP_DATA_DIR, raster_filename), gdal.GA_ReadOnly)
        raster_tile_x = img_loc_tuple[1][0]
        raster_tile_y = img_loc_tuple[1][1]
        ne_lat, ne_lon = pixel_to_lat_lon_web_mercator(raster_dataset, raster_tile_x +
                                                       tile_size, raster_tile_y)
        sw_lat, sw_lon = pixel_to_lat_lon_web_mercator(raster_dataset, raster_tile_x,
                                                       raster_tile_y + tile_size)
        certainty = predictions[idx][0]
        formatted_info = {'certainty': certainty, 'ne_lat': ne_lat, 'ne_lon': ne_lon,
                          'sw_lat': sw_lat, 'sw_lon': sw_lon, 'raster_tile_x': raster_tile_x,
                          'raster_tile_y': raster_tile_y, 'raster_filename': raster_filename,
                          'state_abbrev': state_abbrev, 'country_abbrev': 'USA'
                          }
        combined_data.append(formatted_info)
    return combined_data
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def import_summary_geojson(geojsonfilename, removeNoBuildings=True):
    # driver = ogr.GetDriverByName('geojson')
    datasource = ogr.Open(geojsonfilename, 0)

    layer = datasource.GetLayer()
    print(layer.GetFeatureCount())

    buildingList = []
    for idx, feature in enumerate(layer):

        poly = feature.GetGeometryRef()

        if poly:
            if removeNoBuildings:
                if feature.GetField('BuildingId') != -1:
                    buildingList.append({'ImageId': feature.GetField('ImageId'), 'BuildingId': feature.GetField('BuildingId'),
                                  'poly': feature.GetGeometryRef().Clone()})
            else:

                buildingList.append({'ImageId': feature.GetField('ImageId'), 'BuildingId': feature.GetField('BuildingId'),
                              'poly': feature.GetGeometryRef().Clone()})

    return buildingList
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def import_chip_geojson(geojsonfilename, ImageId=''):
    # driver = ogr.GetDriverByName('geojson')
    datasource = ogr.Open(geojsonfilename, 0)
    if ImageId=='':
        ImageId = geojsonfilename
    layer = datasource.GetLayer()
    print(layer.GetFeatureCount())

    polys = []
    BuildingId = 0
    for idx, feature in enumerate(layer):

        poly = feature.GetGeometryRef()

        if poly:
            BuildingId = BuildingId + 1
            polys.append({'ImageId': ImageId, 'BuildingId': BuildingId,
                          'poly': feature.GetGeometryRef().Clone()})

    return polys
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def mergePolyList(geojsonfilename):

    multipolygon = ogr.Geometry(ogr.wkbMultiPolygon)
    datasource = ogr.Open(geojsonfilename, 0)

    layer = datasource.GetLayer()
    print(layer.GetFeatureCount())


    for idx, feature in enumerate(layer):

        poly = feature.GetGeometryRef()

        if poly:

            multipolygon.AddGeometry(feature.GetGeometryRef().Clone())

    return multipolygon
项目:rastercube    作者:terrai    | 项目源码 | 文件源码
def import_glcf_tile(glcf_header, cell_num, tilefile):
    glcfgrid = grids.GLCFGrid()
    glcf_data = np.zeros((glcfgrid.cell_height, glcfgrid.cell_width, 1),
                         dtype=np.uint8)

    with tempfile.NamedTemporaryFile() as f:
        # uncompress
        with gzip.open(tilefile, 'rb') as f_in, open(f.name, 'wb') as f_out:
            shutil.copyfileobj(f_in, f_out)
        gdal_ds = gdal.Open(f.name)
        assert gdal_ds is not None, "Failed to open GDAL dataset"
        band = gdal_ds.GetRasterBand(1)
        nodata_val = band.GetNoDataValue()
        print "NoData : ", nodata_val
        glcf_data[:, :, 0] = band.ReadAsArray()

    glcf_header.write_frac((cell_num, 0), glcf_data)

    print 'Finished %d' % cell_num

    return True
项目:rastercube    作者:terrai    | 项目源码 | 文件源码
def reproject_tiff_from_model(old_name, new_name, model, unit):
    """
    Reprojects an tiff on a tiff model. Can be used to warp tiff.
    Keyword arguments:
    old_name -- the name of the old tiff file
    new_name -- the name of the output tiff file
    model -- the gdal dataset which will be used to warp the tiff
    unit -- the gdal unit in which the operation will be performed
    """
    mem_drv = gdal.GetDriverByName("MEM")

    old = gdal.Open(old_name)

    new = mem_drv.Create(new_name, model.RasterXSize, model.RasterYSize, 1,
                         unit)

    new.SetGeoTransform(model.GetGeoTransform())
    new.SetProjection(model.GetProjection())

    res = gdal.ReprojectImage(old, new, old.GetProjection(),
                              model.GetProjection(), gdal.GRA_NearestNeighbour)

    assert res == 0, 'Error in ReprojectImage'
    arr = new.ReadAsArray()
    new = None
    return arr
项目:python_scripting_for_spatial_data_processing    作者:upsdeepak    | 项目源码 | 文件源码
def readImageMetaData(inputFile, name):
    # Open the dataset in read only mode
    dataset = gdal.Open(inputFile, gdal.GA_ReadOnly)
    # Check that the image has been opened
    if not dataset is None:
        # Get the meta value specified
        metaData = dataset.GetMetaDataItem(name)
        # Check that it is present
        if metaData == None:
            # If not present, print error.
            print("Could not find \'", name, "\' item.")
        else:
            # Else print out the metaData value.
            print(name, "=\'", metaData, "\'")
    else:
        # Print an error messagge if the file
        # could not be opened.
        print("Could not open the input image file: ", inputFile)

# A function to read a meta-data item
# from a image band
项目:python_scripting_for_spatial_data_processing    作者:upsdeepak    | 项目源码 | 文件源码
def listImageMetaData(inputFile):
    # Open the dataset in Read Only mode
    dataset = gdal.Open(inputFile, gdal.GA_ReadOnly)
    # Check that the image has been opened.
    if not dataset is None:
        # Get the meta-data dictionary
        metaData = dataset.GetMetaData_Dict()
        # Check it has entries.
        if len(metaData) == 0:
            # if it has no entries return
            # error message.
            print("There is no image meta-data.")
        else:
            # Otherwise, print out the
            # meta-data.
            # Loop through each entry.
            for metaItem in metaData:
                print(metaItem)
    else:
        # Print an error message if the file
        # could not be opened.
        print("Could not open the input image file", inputFile)

# This is the first part of the script to
# be executed.
项目:python_scripting_for_spatial_data_processing    作者:upsdeepak    | 项目源码 | 文件源码
def setBandName(inputFile, band, name):
    # Open the image file, in update mode
    # so that the image can be edited.
    dataset = gdal.Open(inputFile, GA_Update)
    # Check that the image has opened
    if not dataset is None:
        # Get the image Band
        imgBand = dataset.GetRasterBand(band)
        # Check the image band was available
        if not imgBand is None:
            # Set the image band name
            imgBand.SetDescription(name)
        else:
            # Print out an error message
            print("Could not open the image band: ", band)

    else:
        # Print the error message if the file
        # could not be opened
        print("Could not open the input image file: ", inputFile)

# This is the first par of the script
# to be executed
项目:chorospy    作者:spyrostheodoridis    | 项目源码 | 文件源码
def raster2array(rasterfn):
    raster = gdal.Open(rasterfn)
    band = raster.GetRasterBand(1)
    nodata = band.GetNoDataValue()
    array = band.ReadAsArray()

    proj = raster.GetProjection()
    inproj = osr.SpatialReference()
    inproj.ImportFromWkt(proj)

    geoTransform = raster.GetGeoTransform()
    minx = geoTransform[0]
    maxy = geoTransform[3]
    maxx = minx + geoTransform[1]*raster.RasterXSize
    miny = maxy + geoTransform[5]*raster.RasterYSize
    extent =  [minx, maxx, miny, maxy]
    pixelSizeXY = [geoTransform[1], geoTransform[5]]
    del raster, band
    return [array, nodata, extent, inproj, pixelSizeXY]

#clip a raster by vector
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def gdal2np_dtype(b):
    """
    Get NumPy datatype that corresponds with GDAL RasterBand datatype
    Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
    """
    dt_dict = gdal_array.codes
    if isinstance(b, str):
        b = gdal.Open(b)
    if isinstance(b, gdal.Dataset):
        b = b.GetRasterBand(1)
    if isinstance(b, gdal.Band):
        b = b.DataType
    if isinstance(b, int):
        np_dtype = dt_dict[b]
    else:
        np_dtype = None
        print("Input must be GDAL Dataset or RasterBand object")
    return np_dtype

#Replace nodata value in GDAL band
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def copyproj(src_fn, dst_fn, gt=True):
    """Copy projection and geotransform from one raster file to another
    """
    src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly)
    dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
    dst_ds.SetProjection(src_ds.GetProjection())
    if gt:
        src_gt = np.array(src_ds.GetGeoTransform())
        src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize])
        dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize])
        #This preserves dst_fn resolution
        if np.any(src_dim != dst_dim):
            res_factor = src_dim/dst_dim.astype(float)
            src_gt[[1, 5]] *= max(res_factor)
            #src_gt[[1, 5]] *= min(res_factor)
            #src_gt[[1, 5]] *= res_factor
        dst_ds.SetGeoTransform(src_gt)
    src_ds = None
    dst_ds = None
项目:GroundSurveyor    作者:bmenard1    | 项目源码 | 文件源码
def split_metatiles(output_dir, metatiles):

    # Determine metatile size and bit depth.
    mt_ds = gdal.Open(metatiles[0])
    assert mt_ds.RasterXSize % UF_TILE_SIZE == 0
    assert mt_ds.RasterYSize % UF_TILE_SIZE == 0

    x_uf_count = mt_ds.RasterXSize / UF_TILE_SIZE
    y_uf_count = mt_ds.RasterYSize / UF_TILE_SIZE

    for ytile in range(y_uf_count):
        print 'Process Row: ', ytile
        split_row_of_unit_fields(output_dir, metatiles, ytile)

    logging.info('Wrote %d piles to directory %s.',
                 x_uf_count * y_uf_count, output_dir)
项目:chip-from-vrt    作者:PlatformStories    | 项目源码 | 文件源码
def mask_chip(feature):
    '''
    Apply polygon mask to bounding-box chips. Chips must be named
        'feature_id.tif' and exist in the current working directory.
    '''
    fn = feature[:-4] + '.geojson'
    chip = gdal.Open(feature)

    # Create ogr vector file for gdal_rasterize
    vectordata = {'type': 'FeatureCollection', 'features': [feature]}

    with open(fn, 'wb') as f:
        geojson.dump(vectordata, f)

    # Mask raster
    cmd = 'gdal_rasterize -q -i -b 1 -b 2 -b 3 -burn 0 -burn 0 -burn 0 {} {}'.format(fn, feature)
    subprocess.call(cmd, shell=True)

    # Remove ogr vector file
    os.remove(fn)
项目:dzetsaka    作者:lennepkade    | 项目源码 | 文件源码
def rasterize(self,inRaster,inShape,inField):
        filename = tempfile.mktemp('.tif')        
        data = gdal.Open(inRaster,gdal.GA_ReadOnly)
        shp = ogr.Open(inShape)

        lyr = shp.GetLayer()

        driver = gdal.GetDriverByName('GTiff')
        dst_ds = driver.Create(filename,data.RasterXSize,data.RasterYSize, 1,gdal.GDT_Byte)
        dst_ds.SetGeoTransform(data.GetGeoTransform())
        dst_ds.SetProjection(data.GetProjection())
        OPTIONS = 'ATTRIBUTE='+inField
        gdal.RasterizeLayer(dst_ds, [1], lyr, None,options=[OPTIONS])
        data,dst_ds,shp,lyr=None,None,None,None


        return filename
项目:forecastVeg    作者:JohnNay    | 项目源码 | 文件源码
def clip(self):

        """
        This function clips the mosaicked, projected images to the size of the
        referenceImage

        """

        subprocess.call(['gdaltindex', self.extent, self.referenceImagePath])
        dataNames = sorted(glob.glob(self.fullPath + '/full*.tif'))
        splitAt = len(self.fullPath) + 1

        for i in range(len(dataNames)):
            x = dataNames[i]
            y = dataNames[i][:splitAt] + dataNames[i][splitAt+4:]
            subprocess.call(['gdalwarp', '-r', 'near', '-cutline', self.extent, '-crop_to_cutline', x, y, '-dstnodata', '9999'])

        for n in dataNames:
            os.remove(n)
        dataNames = sorted(glob.glob(self.fullPath + '/*.tif'))
        test = gdal.Open(dataNames[0]).ReadAsArray()
        logger.log('SUCCESS', 'Clipping complete!  %d %s files  were successfully clipped to the size of %s with dimensions %d rows by %d columns' % (len(dataNames), str(self.outformat), str(self.referenceImagePath), test.shape[0], test.shape[1]))
项目:CHaMP_Metrics    作者:SouthForkResearch    | 项目源码 | 文件源码
def rasterize_polygons(polygon_shp, template_raster, out_raster, field):
    """ generate a categorical raster based on polygons

    :rtype: None
    :param polygon_shp: input polygon shapefile
    :param template_raster: raster template for cellsize and extent
    :param out_raster: output raster file
    """

    # Source: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html

    gdal.UseExceptions()
    # Get template raster specs
    src_ds = gdal.Open(template_raster)
    if src_ds is None:
        print 'Unable to open %s' % template_raster
        sys.exit(1)
    try:
        srcband = src_ds.GetRasterBand(1)
    except RuntimeError, e:
        print 'No band %i found' % 0
        print e
        sys.exit(1)

    # Open the data source and read in the extent
    source_ds = ogr.Open(polygon_shp)
    source_layer = source_ds.GetLayer()

    target_ds = gdal.GetDriverByName('GTiff').Create(out_raster, src_ds.RasterXSize, src_ds.RasterYSize, 1, gdal.GDT_Float32)
    target_ds.SetGeoTransform(src_ds.GetGeoTransform())
    target_ds.SetProjection(src_ds.GetProjection())
    band = target_ds.GetRasterBand(1)
    band.SetNoDataValue(srcband.GetNoDataValue())

    # Rasterize
    gdal.RasterizeLayer(target_ds, [1], source_layer, options=["ATTRIBUTE={}".format(field)])
项目:DRIP-SLIP    作者:NASA-DEVELOP    | 项目源码 | 文件源码
def readTodayBands(path,row):
    allFiles=sorted(glob.glob(os.path.join(getCurrentDirectory(),'Today',path,row,'*.TIF')))
    allRasters=dict([])
    fileNumber=1
    percent=(fileNumber/len(allFiles))*100
    for file in allFiles:
        percent=(fileNumber/len(allFiles))*100
        sys.stdout.write("\rReading today's bands...%d%%" % percent)
        sys.stdout.flush()
        bandName=file[file.rfind('_')+1:-4]
        sys.stdout.write("\rReading today's bands...%d%%" % percent)
        sys.stdout.flush()
        allRasters[bandName]=gdal.Open(file,gdalconst.GA_ReadOnly)
        fileNumber+=1
    todayExtent = getRasterExtent(allRasters['B4'])#saves the extent of today's rasters (they'll match band 4) so that we can crop during the cloudmask backfill
    print('')
    return(allRasters,todayExtent)

#large function that will back-fill cloudy areas of most recent imagery using historic imagery
项目:DRIP-SLIP    作者:NASA-DEVELOP    | 项目源码 | 文件源码
def downloadLandsatScene(jd,year,month,day,path,row,directory):
    LandsatID=LSUniqueID(path,row,year,jd,os.path.join(directory,str(row)))
    downLoadCommand=getCurrentDirectory() + "/download_landsat_scene.py -o scene -b LC8 -d "+ year + month + day + ' -s ' + path + '0'+str(row)+ " -u usgs.txt --output " + LandsatID
    os.system(downLoadCommand)
    print('extracting...')
    tarName=extractTar(LandsatID,os.path.join(directory,str(row)))
    allFiles=glob.glob(os.path.join(directory,str(row),'*.TIF'))
    for filename in allFiles:
        bandName=filename.strip('.TIF')[filename.rfind('_')+1:]
        if bandName != 'B4' and bandName != 'B5' and bandName != 'B7' and bandName != 'B8' and bandName != 'BQA':
            os.remove(filename)
    try:
        shutil.rmtree(os.path.join(directory,str(row),tarName))
    except:
        print('No folder to delete called: ' + os.path.join(directory,str(row),tarName))
    try:
        os.remove(os.path.join(directory,str(row),tarName + '_MTL.txt'))
    except:
        print('No file to delete called: ' + os.path.join(directory,str(row),tarName + '_MTL.txt'))
    reprojectPanBand(gdal.Open(os.path.join(directory,str(row),tarName + '_B8.TIF'),gdalconst.GA_ReadOnly),gdal.Open(os.path.join(directory,str(row),tarName + '_B4.TIF'),gdalconst.GA_ReadOnly),os.path.join(directory,str(row),tarName + '_B8.TIF'))
    SLIP.model(year+month+day,path,str(row))
项目:unmixing    作者:arthur-e    | 项目源码 | 文件源码
def test_hall_rectification(self):
        '''Should rectify an image in the expected way.'''
        control_sets = {
            'High/Bright': [(331501.45,4694346.66), (319495.39,4706820.66), (298527.006,4691417.99)],
            'Low/Dark': [(322577.40,4658508.99), (361612.79,4694665.62), (378823.69,4692132.56)]
        }
        ref_raster = gdal.Open(os.path.join(self.test_dir, 'multi7_raster.tiff'))
        sub_raster = gdal.Open(os.path.join(self.test_dir, 'multi7_raster2.tiff'))

        # NOTE: Using same control sets for reference, subject
        hall_rectification(ref_raster, sub_raster, self.test_dir, control_sets, control_sets)

        arr, gt, wkt = as_array(os.path.join(self.test_dir, 'rect_multi7_raster2.tiff'))
        self.assertTrue(np.array_equal(arr.shape, (6, 74, 81)))
        self.assertTrue(np.array_equiv(arr[:,50,50].round(5), np.array([
            17, 1331, 1442, 3422, 2916, 2708
        ]).round(5)))
项目:unmixing    作者:arthur-e    | 项目源码 | 文件源码
def test_spectral_profile(self):
        '''
        Should correctly retrieve a spectral profile from a raster dataset.
        '''
        coords = ((-84.5983, 42.7256), (-85.0807, 41.1138))
        pixels = [(18, 0), (2, 59)]
        file_path = os.path.join(self.test_dir, 'multi3_raster.tiff')
        ds = gdal.Open(file_path)
        kwargs = {
            'gt': ds.GetGeoTransform(),
            'wkt': ds.GetProjection(),
            'dd': True
        }

        # The true spectral profile
        spectra = np.array([[237, 418, 325], [507, 616, 445]], dtype=np.int16)
        sp1 = spectra_at_xy(ds, coords, **kwargs)
        sp2 = spectra_at_xy(ds.ReadAsArray(), coords, **kwargs)
        sp3 = spectra_at_idx(ds.ReadAsArray().transpose(), pixels)
        self.assertEqual(spectra.tolist(), sp1.tolist())
        self.assertEqual(spectra.tolist(), sp2.tolist())
        self.assertEqual(spectra.tolist(), sp3.tolist())
项目:unmixing    作者:arthur-e    | 项目源码 | 文件源码
def array_to_raster_clone(a, proto, xoff=None, yoff=None):
    '''
    Creates a raster from a given array and a prototype raster dataset, with
    optional x- and y-offsets if the array was clipped. Arguments:
        a       A NumPy array
        proto   A prototype dataset
        xoff    The offset in the x-direction; should be provided when clipped
        yoff    The offset in the y-direction; should be provided when clipped
    '''
    rast = gdal_array.OpenNumPyArray(a)

    kwargs = dict()
    if xoff is not None and yoff is not None:
        kwargs = dict(xoff=xoff, yoff=yoff)

    # Copy the projection info and metadata from a prototype dataset
    if type(proto) == str:
        proto = gdal.Open(proto)

    gdalnumeric.CopyDatasetInfo(proto, rast, **kwargs)

    return rast
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def get_lulc_ds_full(ds, lulc_source=None):
    if lulc_source is None:
        lulc_source = get_lulc_source(ds)
    if lulc_source == 'nlcd':
        lulc_ds_full = gdal.Open(get_nlcd_fn())
    elif lulc_source == 'bareground':
        lulc_ds_full = gdal.Open(get_bareground_fn())
    return lulc_ds_full
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def proc_modscag(fn_list, extent=None, t_srs=None):
    """Process the MODSCAG products for full date range, create composites and reproject
    """
    #Use cubic spline here for improve upsampling 
    ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
    stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) 
    #Create stack here - no need for most of mastack machinery, just make 3D array
    #Mask values greater than 100% (clouds, bad pixels, etc)
    ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)

    stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
    stack_count.set_fill_value(0)
    stack_min = ma_stack.min(axis=0).astype(np.uint8)
    stack_min.set_fill_value(0)
    stack_max = ma_stack.max(axis=0).astype(np.uint8)
    stack_max.set_fill_value(0)
    stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
    stack_med.set_fill_value(0)

    out_fn = stack_fn + '_count.tif'
    iolib.writeGTiff(stack_count, out_fn, ds_list[0])
    out_fn = stack_fn + '_max.tif'
    iolib.writeGTiff(stack_max, out_fn, ds_list[0])
    out_fn = stack_fn + '_min.tif'
    iolib.writeGTiff(stack_min, out_fn, ds_list[0])
    out_fn = stack_fn + '_med.tif'
    iolib.writeGTiff(stack_med, out_fn, ds_list[0])

    ds = gdal.Open(out_fn)
    return ds
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def _open(self):
            if not _gdal:
                load_lib()
            self._ds = _gdal.Open(self.request.get_local_filename())
项目:HistoricalMap    作者:lennepkade    | 项目源码 | 文件源码
def polygonize(self,rasterTemp,outShp):

            sourceRaster = gdal.Open(rasterTemp)
            band = sourceRaster.GetRasterBand(1)
            driver = ogr.GetDriverByName("ESRI Shapefile")
            # If shapefile already exist, delete it
            if os.path.exists(outShp):
                driver.DeleteDataSource(outShp)

            outDatasource = driver.CreateDataSource(outShp)            
            # get proj from raster            
            srs = osr.SpatialReference()
            srs.ImportFromWkt( sourceRaster.GetProjectionRef() )
            # create layer with proj
            outLayer = outDatasource.CreateLayer(outShp,srs)
            # Add class column (1,2...) to shapefile

            newField = ogr.FieldDefn('Class', ogr.OFTInteger)
            outLayer.CreateField(newField)

            gdal.Polygonize(band, None,outLayer, 0,[],callback=None)  

            outDatasource.Destroy()
            sourceRaster=None
            band=None


            ioShpFile = ogr.Open(outShp, update = 1)


            lyr = ioShpFile.GetLayerByIndex(0)

            lyr.ResetReading()    

            for i in lyr:
                lyr.SetFeature(i)
            # if area is less than inMinSize or if it isn't forest, remove polygon 
                if i.GetField('Class')!=1:
                    lyr.DeleteFeature(i.GetFID())        
            ioShpFile.Destroy()

            return outShp
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_gdal_vs_numpy(self):
        for t in self.tifs:
            ds = gdal.Open(t)
            n_list = geoinfo.numpy_band_stats(ds, t, 1)
            g_list = geoinfo.band_stats(ds, t, 1)
            self.assertEqual(n_list[0], g_list[0])
            self.assertEqual(n_list[-3], g_list[-3])
            self.assertEqual(n_list[-2], g_list[-2])

            np.testing.assert_almost_equal(
                np.array([float(t) for t in n_list[1:-3]]),
                np.array([float(t) for t in g_list[1:-3]]),
                decimal=4
            )
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_geotransform_projection_nodata(self):
        tmp_output = tempfile.mktemp(suffix='.tif')
        extents = [str(s) for s in [920000, -4300000, 929000, -4290000]]

        # the mock is already geotransformed, so this will have no effect
        # to projection and nodata, but will be cropped making the
        # geotransform tuple different
        crop.crop_reproject_resample(self.std2000_no_mask, tmp_output,
                                     sampling='bilinear',
                                     extents=extents,
                                     reproject=True)

        ds = gdal.Open(tmp_output)
        gt = ds.GetGeoTransform()
        projection = ds.GetProjection()
        nodata = ds.GetRasterBand(1).GetNoDataValue()
        ds = None

        ds = gdal.Open(self.std2000_no_mask)
        projection_input = ds.GetProjection()
        nodata_input = ds.GetRasterBand(1).GetNoDataValue()
        ds = None

        self.assertEqual(nodata, nodata_input)
        self.assertEqual(projection, projection_input)
        self.assertEqual(gt[1], float(crop.OUTPUT_RES[0]))
        self.assertEqual(gt[0], float(extents[0]))
        self.assertEqual(gt[3], float(extents[3]))
        os.remove(tmp_output)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_apply_mask(self):
        output_file = tempfile.mktemp(suffix='.tif')
        jpeg = False
        tmp_out_file = tempfile.mktemp(suffix='.tif')
        shutil.copy(self.std2000_no_mask, tmp_out_file)
        crop.apply_mask(mask_file=self.mask,
                        tmp_output_file=tmp_out_file,
                        output_file=output_file,
                        jpeg=jpeg)

        ds = gdal.Open(output_file)
        gt = ds.GetGeoTransform()
        projection = ds.GetProjection()
        nodata = ds.GetRasterBand(1).GetNoDataValue()
        ds = None

        ds = gdal.Open(self.std2000)
        projection_input = ds.GetProjection()
        nodata_input = ds.GetRasterBand(1).GetNoDataValue()
        ds = None

        self.assertEqual(nodata, nodata_input)
        self.assertEqual(projection, projection_input)
        self.assertEqual(gt[1], float(crop.OUTPUT_RES[0]))
        self.assertEqual(gt[0], self.extents[0])
        self.assertEqual(gt[3], self.extents[3])
        os.remove(output_file)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_mpi_vs_serial(self):

        tmpdir1 = tempfile.mkdtemp()
        tmpdir2 = tempfile.mkdtemp()

        for n, partitions in product(range(1, 3), range(2, 100, 10)):
            size = 2 * n + 1
            raster_average.treat_file(self.test_tif,
                                      out_dir=tmpdir1,
                                      size=size,
                                      func='nanmean',
                                      partitions=partitions)
            arr1 = gdal.Open(
                join(tmpdir1, basename(self.test_tif))).ReadAsArray()

            raster_average.treat_file(self.test_tif,
                                      out_dir=tmpdir2,
                                      size=size,
                                      func='nanmean',
                                      partitions=partitions)
            arr2 = gdal.Open(join(tmpdir2,
                                  basename(self.test_tif))).ReadAsArray()

            np.testing.assert_array_almost_equal(arr1, arr2)

        shutil.rmtree(tmpdir2)
        shutil.rmtree(tmpdir1)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def apply_mask(mask_file, tmp_output_file, output_file, jpeg):
    """
    Parameters
    ----------
    mask_file: mask file path
    tmp_output_file: intermediate cropped geotiff before mask application
    output_file: output geotiff path
    jpeg: boolean, whether to produce jpeg or not
    -------

    """
    mask = get_mask(mask_file)

    out_ds = gdal.Open(tmp_output_file, gdal.GA_Update)
    out_band = out_ds.GetRasterBand(1)
    out_data = out_band.ReadAsArray()
    no_data_value = out_band.GetNoDataValue()
    log.info('Found NoDataValue {} for file {}'.format(
        no_data_value, os.path.basename(tmp_output_file)))
    if no_data_value is not None:
        out_data[mask] = no_data_value
    else:
        log.warning('NoDataValue was not set for {}'.format(tmp_output_file))
        log.info('Manually setting NoDataValue for {}'.format(tmp_output_file))
    out_band.WriteArray(out_data)
    out_ds = None  # close dataset and flush cache

    # move file to output file
    shutil.move(tmp_output_file, output_file)
    log.info('Output file {} created'.format(tmp_output_file))

    if jpeg:
        dir_name = os.path.dirname(output_file)
        jpeg_file = os.path.basename(output_file).split('.')[0] + '.jpg'
        jpeg_file = os.path.join(dir_name, jpeg_file)
        cmd_jpg = ['gdal_translate', '-ot', 'Byte', '-of', 'JPEG', '-scale',
                   output_file,
                   jpeg_file] + COMMON
        subprocess.check_call(cmd_jpg)
        log.info('Created {}'.format(jpeg_file))
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def average(input_dir, out_dir, size):
    input_dir = abspath(input_dir)
    log.info('Reading tifs from {}'.format(input_dir))
    tifs = glob.glob(join(input_dir, '*.tif'))

    for tif in tifs:
        data_source = gdal.Open(tif, gdal.GA_ReadOnly)
        band = data_source.GetRasterBand(1)
        # data_type = gdal.GetDataTypeName(band.DataType)
        data = band.ReadAsArray()
        no_data_val = band.GetNoDataValue()
        averaged_data = filter_data(data, size, no_data_val)
        log.info('Calculated average for {}'.format(basename(tif)))

        output_file = join(out_dir, 'average_' + basename(tif))
        out_ds = gdal.GetDriverByName('GTiff').Create(
            output_file, data_source.RasterXSize, data_source.RasterYSize,
            1, band.DataType)
        out_band = out_ds.GetRasterBand(1)
        out_band.WriteArray(averaged_data)
        out_ds.SetGeoTransform(data_source.GetGeoTransform())
        out_ds.SetProjection(data_source.GetProjection())
        out_band.FlushCache()  # Write data to disc
        out_ds = None  # close out_ds
        data_source = None  # close dataset

        log.info('Finished converting {}'.format(basename(tif)))
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def gdalaverage(input_dir, out_dir, size):
    """
    average data using gdal's averaging method.
    Parameters
    ----------
    input_dir: str
        input dir name of the tifs that needs to be averaged
    out_dir: str
        output dir name
    size: int, optional
        size of kernel
    Returns
    -------

    """
    input_dir = abspath(input_dir)
    log.info('Reading tifs from {}'.format(input_dir))
    tifs = glob.glob(join(input_dir, '*.tif'))

    process_tifs = np.array_split(tifs, mpiops.chunks)[mpiops.chunk_index]

    for tif in process_tifs:
        data_set = gdal.Open(tif, gdal.GA_ReadOnly)
        # band = data_set.GetRasterBand(1)
        # data_type = gdal.GetDataTypeName(band.DataType)
        # data = band.ReadAsArray()
        # no_data_val = band.GetNoDataValue()
        # averaged_data = filter_data(data, size, no_data_val)
        log.info('Calculated average for {}'.format(basename(tif)))

        output_file = join(out_dir, 'average_' + basename(tif))
        src_gt = data_set.GetGeoTransform()
        tmp_file = '/tmp/tmp_{}.tif'.format(mpiops.chunk_index)
        resample_cmd = [TRANSLATE] + [tif, tmp_file] + \
            ['-tr', str(src_gt[1]*size), str(src_gt[1]*size)] + \
            ['-r', 'bilinear']
        check_call(resample_cmd)
        rollback_cmd = [TRANSLATE] + [tmp_file, output_file] + \
            ['-tr', str(src_gt[1]), str(src_gt[1])]
        check_call(rollback_cmd)
        log.info('Finished converting {}'.format(basename(tif)))
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def get_stats(tif, partitions=100):
    ds = gdal.Open(tif, gdal.GA_ReadOnly)
    number_of_bands = ds.RasterCount

    if ds.RasterCount > 1:
        d = {}
        log.info('Found multibanded geotif {}'.format(basename(tif)))
        for b in range(number_of_bands):
            d['{tif}_{b}'.format(tif=tif, b=b)] = band_stats(ds, tif, b + 1,
                                                             partitions)
        return d
    else:
        log.info('Found single band geotif {}'.format(basename(tif)))
        return {tif: band_stats(ds, tif, 1, partitions)}
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def get_numpy_stats(tif, writer):
    ds = gdal.Open(tif, gdal.GA_ReadOnly)
    number_of_bands = ds.RasterCount

    if ds.RasterCount > 1:
        log.info('Found multibanded geotif {}'.format(basename(tif)))
        for b in range(number_of_bands):
            write_rows(stats=number_of_bands(ds, tif, b + 1), writer=writer)
    else:
        log.info('Found single band geotif {}'.format(basename(tif)))
        write_rows(stats=number_of_bands(ds, tif, 1), writer=writer)
项目:Global_GPP_VPM_NCEP_C3C4    作者:zhangyaonju    | 项目源码 | 文件源码
def VPMprmt (directory,tile):
    tempfile=None
    for path, subdirs, files in os.walk(directory):
        for name in files:
            if (tile in name) and (".tif" == name[-4:]) : tempfile=os.path.join(path,name)
    print tempfile
    inprmtdata=gdal.Open(tempfile)
    prmtdata=inprmtdata.ReadAsArray()
    return prmtdata


#Function to build vrt
项目:Global_GPP_VPM_NCEP_C3C4    作者:zhangyaonju    | 项目源码 | 文件源码
def movingaverage(tile):
    # Output directories for moving average LSWImax

    # if the output directories don't exist, create the new directories
    if not os.path.exists(dirLSWIMA):
        os.makedirs(dirLSWIMA)
    # build LSWImax vrt file and read as an array
    file=buildVrtFile(year,tile)
    vrtLSWImax=os.path.join(os.path.dirname(file),year+tile+'LSWImax_vrt.vrt')
    #print "Building the vrt file: ", year+tile+vrtLSWImax
    os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLSWImax)

    global rows, cols, geoProj,geoTran
    inLSWImax=gdal.Open(vrtLSWImax)
    #print "reading the multi-LSWI..."
    LSWImax=inLSWImax.ReadAsArray()

    rows = 2400
    cols = 2400
    geoTran=inLSWImax.GetGeoTransform()
    geoProj=inLSWImax.GetProjection()

    #find the second largest LSWImax
    secLSWImax = numpy.sort(LSWImax, axis=0, kind='quicksort', order=None)[3,:,:]

    write_file(dirLSWIMA+'/'+tile+'.'+year+'.maxLSWI_MA.tif',secLSWImax,geoTran,rows,cols,geoProj,driverName='GTiff')

    secLSWImax=None
    LSWImax=None
项目:Global_GPP_VPM_NCEP_C3C4    作者:zhangyaonju    | 项目源码 | 文件源码
def movingaverage(tile):
    # Output directories for moving average LSWImax

    # if the output directories don't exist, create the new directories
    if not os.path.exists(dirLSWIMA):
        os.makedirs(dirLSWIMA)
    # build LSWImax vrt file and read as an array
    file=buildVrtFile(year,tile)
    vrtLSWImax=os.path.join(os.path.dirname(file),year+tile+'LSWImax_vrt.vrt')
    #print "Building the vrt file: ", year+tile+vrtLSWImax
    os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLSWImax)

    global rows, cols, geoProj,geoTran
    inLSWImax=gdal.Open(vrtLSWImax)
    #print "reading the multi-LSWI..."
    LSWImax=inLSWImax.ReadAsArray()

    rows = 2400
    cols = 2400
    geoTran=inLSWImax.GetGeoTransform()
    geoProj=inLSWImax.GetProjection()

    #find the second largest LSWImax
    secLSWImax = numpy.sort(LSWImax, axis=0, kind='quicksort', order=None)[3,:,:]

    write_file(dirLSWIMA+'/'+tile+'.'+year+'.maxLSWI_MA.tif',secLSWImax,geoTran,rows,cols,geoProj,driverName='GTiff')

    secLSWImax=None
    LSWImax=None
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def buildTindex(rasterFolder, rasterExtention='.tif'):
    rasterList = glob.glob(os.path.join(rasterFolder, '*{}'.format(rasterExtention)))
    print(rasterList)

    print(os.path.join(rasterFolder, '*{}'.format(rasterExtention)))

    memDriver = ogr.GetDriverByName('MEMORY')
    gTindex = memDriver.CreateDataSource('gTindex')
    srcImage = gdal.Open(rasterList[0])
    spat_ref = osr.SpatialReference()
    spat_ref.SetProjection(srcImage.GetProjection())
    gTindexLayer = gTindex.CreateLayer("gtindexlayer", spat_ref, geom_type=ogr.wkbPolygon)

    # Add an ID field
    idField = ogr.FieldDefn("location", ogr.OFTString)
    gTindexLayer.CreateField(idField)

    # Create the feature and set values
    featureDefn = gTindexLayer.GetLayerDefn()



    for rasterFile in rasterList:
        srcImage = gdal.Open(rasterFile)

        geoTrans, polyToCut, ulX, ulY, lrX, lrY = gT.getRasterExtent(srcImage)

        feature = ogr.Feature(featureDefn)
        feature.SetGeometry(polyToCut)
        feature.SetField("location", rasterFile)
        gTindexLayer.CreateFeature(feature)
        feature = None


    return gTindex, gTindexLayer
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def createTiledGeoJsonFromSrc(rasterFolderLocation, vectorSrcFile, geoJsonOutputDirectory, rasterTileIndex='',
                              geoJsonPrefix='GEO', rasterFileExtenstion='.tif',
                              rasterPrefixToReplace='PAN'
                              ):
    if rasterTileIndex == '':
        gTindex, gTindexLayer = buildTindex(rasterFolderLocation, rasterExtention=rasterFileExtenstion)
    else:
        gTindex = ogr.Open(rasterTileIndex,0)
        gTindexLayer = gTindex.GetLayer()

    shapeSrc = ogr.Open(vectorSrcFile,0)
    chipSummaryList = []
    for feature in gTindexLayer:
        featureGeom = feature.GetGeometryRef()
        rasterFileName = feature.GetField('location')
        rasterFileBaseName = os.path.basename(rasterFileName)
        outGeoJson = rasterFileBaseName.replace(rasterPrefixToReplace, geoJsonPrefix)
        outGeoJson = outGeoJson.replace(rasterFileExtenstion, '.geojson')
        outGeoJson = os.path.join(geoJsonOutputDirectory, outGeoJson)

        gT.clipShapeFile(shapeSrc, outGeoJson, featureGeom, minpartialPerc=0.0, debug=False)
        imageId = rasterFileBaseName.replace(rasterPrefixToReplace+"_", "")
        chipSummary = {'chipName': rasterFileName,
                           'geoVectorName': outGeoJson,
                           'imageId': os.path.splitext(imageId)[0]}

        chipSummaryList.append(chipSummary)

    return chipSummaryList
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def createTruthPixelLinePickle(truthLineFile, pickleLocation=''):
    if pickleLocation=='':
        extension = os.path.splitext(truthLineFile)[1]
        pickleLocation = truthLineFile.replace(extension, 'Pixline.p')
    if truthLineFile != '':
        # get Source Line File Information
        shapef = ogr.Open(truthLineFile, 0)
        truthLayer = shapef.GetLayer()
        pt1X = []
        pt1Y = []
        pt2X = []
        pt2Y = []
        for tmpFeature in truthLayer:
            tmpGeom = tmpFeature.GetGeometryRef()
            for i in range(0, tmpGeom.GetPointCount()):
                pt = tmpGeom.GetPoint(i)

                if i == 0:
                    pt1X.append(pt[0])
                    pt1Y.append(pt[1])
                elif i == 1:
                    pt2X.append(pt[0])
                    pt2Y.append(pt[1])

        lineData = {'pt1X': np.asarray(pt1X),
                    'pt1Y': np.asarray(pt1Y),
                    'pt2X': np.asarray(pt2X),
                    'pt2Y': np.asarray(pt2Y)
                    }

        with open(pickleLocation, 'wb') as f:
            pickle.dump(lineData, f)
            # get Source Line File Information
项目:utilities    作者:SpaceNetChallenge    | 项目源码 | 文件源码
def createTruthPixelPolyPickle(truthPoly, pickleLocation=''):
    # returns dictionary with list of minX, maxX, minY, maxY

    if pickleLocation=='':
        extension = os.path.splitext(truthPoly)[1]
        pickleLocation = truthPoly.replace(extension, 'PixPoly.p')
    if truthPoly != '':
        # get Source Line File Information
        shapef = ogr.Open(truthPoly, 0)
        truthLayer = shapef.GetLayer()
        envList = []

        for tmpFeature in truthLayer:
            tmpGeom = tmpFeature.GetGeometryRef()
            env = tmpGeom.GetEvnelope()
            envList.append(env)

        envArray = np.asarray(envList)
        envelopeData = {'minX': envArray[:,0],
                        'maxX': envArray[:,1],
                        'minY': envArray[:,2],
                        'maxY': envArray[:,3]
                        }


        with open(pickleLocation, 'wb') as f:
            pickle.dump(envelopeData, f)
            # get Source Line File Information
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def openm3(input_data):
    if input_data.split('.')[-1] == 'hdr':
        # GDAL wants the img, but many users aim at the .hdr
        input_data = input_data.split('.')[0] + '.img'
    ds = gdal.Open(input_data)
    ref_array = ds.GetRasterBand(1).ReadAsArray()
    metadata = ds.GetMetadata()
    wv_array = metadatatoband(metadata)
    return wv_array, ref_array, ds
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def openmi(input_data):
    ds = gdal.Open(input_data)
    band_pointers = []
    nbands = ds.RasterCount

    for b in xrange(1, nbands + 1):
        band_pointers.append(ds.GetRasterBand(b))

    ref_array = ds.GetRasterBand(1).ReadAsArray()
    wv_array = None
    return wv_array, ref_array[::3, ::3], ds
项目:python_scripting_for_spatial_data_processing    作者:upsdeepak    | 项目源码 | 文件源码
def readBandMetaData(inputFile, band, name):
    # Open the dataset in Read Only mode
    dataset = gdal.Open(inputFile, gdal.GA_ReadOnly)
    # Check that the image has opened
    if not dataset is None:
        # Get the image band
        imgBand = dataset.GetRasterBand(band)
        # Check the image band was available
        if not imgBand is None:
            # Get the meta data value specified
            metaData = imgBand.GetMetaDataItem(name)
            # Check that it is present
            if metaData == None:
                # If not present, print error.
                print("Could not find \'", name, "\' item.")
            else:
                # Else print out the metadata value
                print(name, "=\'", metaData, "\'")

        else:
            # Print out an error message.
            print("Could not open the image band: ", band)

    else:
        # Print an error message if the file
        # could not be opened.
        print("Could not open the input image file: ", inputFile)

# A function to read a meta-data item
# from an image