Python matplotlib 模块,dates() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用matplotlib.dates()

项目:pyGrav    作者:basileh    | 项目源码 | 文件源码
def load_start_end_dates(self):
        """
        function for loading start and end dates of each survey we want to 
        process. File format is
        yyy/mm/dd hh:mn:ss yyy/mm/dd hh:mn:ss 
        yyy/mm/dd hh:mn:ss yyy/mm/dd hh:mn:ss 
        ...

        update the Campaign object stored in the program by populating its
        survey dictionary

        """
        #set the survey selection option required by the survey populating 
        #function:        
        self.survey_selection_option=2
        print 'hold on a sec'
        self.statusBar().showMessage("Hold on a sec")        
        fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.data_path)          
        self.surveydates=read_start_end_dates(fname)        
        #call the next generic step of the survey selection process        
        self.baseStationSelection()
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def rel_dt_test(dt, min_rel_dt=(1,1), max_rel_dt=(12,31)):
    if dt_check(dt): 
        dt_doy = dt2doy(dt)
        min_doy = dt2doy(datetime(dt.year, *min_rel_dt))
        max_doy = dt2doy(datetime(dt.year, *max_rel_dt))
        #If both relative dates are in the same year
        if min_doy < max_doy:
            min_dt = datetime(dt.year, min_rel_dt[0], min_rel_dt[1])
            max_dt = datetime(dt.year, max_rel_dt[0], max_rel_dt[1])
        else:
            #If relative dates span Jan 1
            if dt_doy >= min_doy:
                min_dt = datetime(dt.year, min_rel_dt[0], min_rel_dt[1])
                max_dt = datetime(dt.year + 1, max_rel_dt[0], max_rel_dt[1])
            else:
                min_dt = datetime(dt.year - 1, min_rel_dt[0], min_rel_dt[1])
                max_dt = datetime(dt.year, max_rel_dt[0], max_rel_dt[1])
        out = (dt >= min_dt) & (dt <= max_dt)
    else:
        out = False
    return out
项目:deep_heart_hackatho    作者:akshaynathr    | 项目源码 | 文件源码
def extractWeekendHighlights(dates):
    weekendsOut = []
    weekendSearch = [5, 6]
    weekendStart = None
    for i, date in enumerate(dates):
        if date.weekday() in weekendSearch:
            if weekendStart is None:
                # Mark start of weekend
                weekendStart = i
        else:
            if weekendStart is not None:
                # Mark end of weekend
                weekendsOut.append((
                    weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
                ))
                weekendStart = None

    # Cap it off if we're still in the middle of a weekend
    if weekendStart is not None:
        weekendsOut.append((
            weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
        ))

    return weekendsOut
项目:pyGrav    作者:basileh    | 项目源码 | 文件源码
def automaticSurveySelection(self):
        """
        Option for automatic selection of surveys among the raw data set.

        Algo:
        A time threshold is asked to the user, and used as a criteria to 
        separate different survey. Time intervals between measurement dates for
        which station number changes are compared to the threshold. 
        If it is higher, a new survey is considered.

        The base station is asked to the user

        Remarks:
        Only works when the base station (asked to the user) is allways the 
        same. Whether the base station number is the same as the cycling
        station number is asked to the user.
        when complicated loop geometries are used, or for specific survey 
        designs, this option is likely to fail.
        """
        #set the survey selection option required by the survey populating 
        #function:
        self.survey_selection_option=1
        text, ok = QtGui.QInputDialog.getText(self, 'Input parameters', 
            'time threshold (hr)')        
        if ok:
            self.t_threshold=int(text)
            text, ok2 = QtGui.QInputDialog.getText(self, 'Input parameters', 
            'base station=cycling station? (1=y/0=n)')        
            if ok2:
                self.base_cycling_station=int(text)
                #call the next generic step of the survey selection process
                self.baseStationSelection()
项目:pyGrav    作者:basileh    | 项目源码 | 文件源码
def setSingleSurvey(self,swin):
        """
        When user chooses to enter manually start and end dates of a single
        survey. Fill in the caimpagndata property.
        """
        self.surveydates=[(datetime(int(swin.yrEdit.text()),int(swin.monthEdit.text()),
            int(swin.dayEdit.text()),int(swin.hrEdit.text()),
            int(swin.mnEdit.text()),int(swin.ssEdit.text())),
            datetime(int(swin.yrEditend.text()),int(swin.monthEditend.text()),
            int(swin.dayEditend.text()),int(swin.hrEditend.text()),
            int(swin.mnEditend.text()),int(swin.ssEditend.text())))]
        #call the next generic step of the survey selection process               
        self.baseStationSelection()
项目:lddmm-ot    作者:jeanfeydy    | 项目源码 | 文件源码
def mpl_dates_to_datestrings(dates, mpl_formatter):
    """Convert matplotlib dates to iso-formatted-like time strings.

    Plotly's accepted format: "YYYY-MM-DD HH:MM:SS" (e.g., 2001-01-01 00:00:00)

    Info on mpl dates: http://matplotlib.org/api/dates_api.html

    """
    _dates = dates

    # this is a pandas datetime formatter, times show up in floating point days
    # since the epoch (1970-01-01T00:00:00+00:00)
    if mpl_formatter == "TimeSeries_DateFormatter":
        try:
            dates = matplotlib.dates.epoch2num(
                [date*24*60*60 for date in dates]
            )
            dates = matplotlib.dates.num2date(dates, tz=pytz.utc)
        except:
            return _dates

    # the rest of mpl dates are in floating point days since
    # (0001-01-01T00:00:00+00:00) + 1. I.e., (0001-01-01T00:00:00+00:00) == 1.0
    # according to mpl --> try num2date(1)
    else:
        try:
            dates = matplotlib.dates.num2date(dates, tz=pytz.utc)
        except:
            return _dates

    time_stings = [' '.join(date.isoformat().split('+')[0].split('T'))
                   for date in dates]
    return time_stings
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def dt2o(dt):
    """Convert datetime to Python ordinal
    """
    #return datetime.toordinal(dt)
    #This works for arrays of dt
    #return np.array(matplotlib.dates.date2num(dt))
    return matplotlib.dates.date2num(dt)

#Need to split ordinal into integer and decimal parts
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def o2dt(o):
    """Convert Python ordinal to datetime
    """
    #omod = np.modf(o)
    #return datetime.fromordinal(int(omod[1])) + timedelta(days=omod[0])
    #Note: num2date returns dt or list of dt
    #This funciton should always return a list
    #return np.array(matplotlib.dates.num2date(o))
    return matplotlib.dates.num2date(o)

#Return integer DOY (julian)
项目:autoxd    作者:nessessary    | 项目源码 | 文件源码
def weekday_candlestick(ohlc_data, ax, fmt='%b %d', freq=7, **kwargs):
    """ Wrapper function for matplotlib.finance.candlestick_ohlc
        that artificially spaces data to avoid gaps from weekends 
    ??????????
    fmt: ????
    freq: ???????
    """

    # Convert data to numpy array
    ohlc_data_arr = np.array(ohlc_data)
    ohlc_data_arr2 = np.hstack(
        [np.arange(ohlc_data_arr[:,0].size)[:,np.newaxis], ohlc_data_arr[:,1:]])
    ndays = ohlc_data_arr2[:,0]  # array([0, 1, 2, ... n-2, n-1, n])

    # Convert matplotlib date numbers to strings based on `fmt`
    dates = mdates.num2date(ohlc_data_arr[:,0])
    date_strings = []
    for date in dates:
        date_strings.append(date.strftime(fmt))

    # Plot candlestick chart
    mpf.candlestick_ohlc(ax, ohlc_data_arr2, **kwargs)

    # Format x axis
    ax.set_xticks(ndays[::freq])
    ax.set_xticklabels(date_strings[::freq], rotation=45, ha='right')
    ax.set_xlim(ndays.min(), ndays.max())
项目:Amazon-Alert    作者:anfederico    | 项目源码 | 文件源码
def plotDatePrice(productID, productTitle, data):

    # Data setup 
    x, y = [], []
    for datapoint in data:
        date = datapoint.split('|')[0]
        price = float(datapoint.split('|')[1])
        x.append(dt.datetime.strptime(date, '%Y-%m-%d'))
        y.append(price)     
    x = matplotlib.dates.date2num(x)    
    x_np, y_np = np.array(x), np.array(y)   

    # Plot setup
    ax = plt.figure(figsize=(6, 3)).add_subplot(111)
    ax.spines['top'].set_visible(False)   
    ax.spines['right'].set_visible(False)   
    ax.get_xaxis().tick_bottom()  
    ax.get_yaxis().tick_left()   
    ax.plot(x_np, y_np, color='lightblue', lw=2)
    ax.margins(0.05)  
    ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: ('$%i' % (x))))
    ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
    plt.yticks(fontsize=8)
    plt.ylim(ymin=min(y)*0.7, ymax=max(y)*1.3)
    plt.title('Recent Price History\n'+productTitle, weight ='light', fontsize=12, y=1.08)  
    plt.xticks(rotation=40, fontsize=7) 
    plt.tight_layout()
    plt.savefig(productID+'.png')
    return productID+'.png'

# ----- Email Configuration ----------------------------------------------------
项目:zeronet-easymetrics    作者:kostaNew    | 项目源码 | 文件源码
def default(general, task, scope):

    #Read and clean data
    data = pd.read_csv(os.path.join(general["folder"], general["zite_data"]))
    data = clean_additional_header(data)
    data['time'] = pd.to_datetime(data['time'], format="%Y-%m-%d_%H-%M-%S")
    data['peers_total'] = data['peers_total'].astype('int')

    #Peers by time
    plot_series = []
    for el in scope:
        zite_name = el['zite']
        temp_ = data[data.address == zite_name][['time','peers_total']].sort_values(by='time')
        plot_series.append({'time':temp_['time'], 'peers_total':temp_['peers_total'], 'label':el['label']})

    #Plots
    fig = plt.figure()
    ax = plt.subplot(111)

    for p in plot_series:
        ax.plot(p['time'], p['peers_total'], label=p['label'])

    plt.xticks(rotation=15) 

    majorFormatter = matplotlib.dates.DateFormatter('%m-%d %H:%M:%S')
    ax.xaxis.set_major_formatter(majorFormatter)
    ax.autoscale_view()

    ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
          ncol=3, fancybox=True, shadow=True)

    plt.show()
项目:deep_heart_hackatho    作者:akshaynathr    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        super(NuPICPlotOutput, self).__init__(*args, **kwargs)
        # Turn matplotlib interactive mode on.
        plt.ion()
        self.dates = []
        self.convertedDates = []
        self.value = []
        self.rawValue = []
        self.allValues = []
        self.allRawValues = []
        self.predicted = []
        self.anomalyScore = []
        self.anomalyLikelihood = []
        self.actualLine = None
        self.rawLine = None
        self.predictedLine = None
        self.anomalyScoreLine = None
        self.anomalyLikelihoodLine = None
        self.linesInitialized = False
        self._chartHighlights = []
        fig = plt.figure(figsize=(16, 10))
        gs = gridspec.GridSpec(2, 1, height_ratios=[3,    1])

        self._mainGraph = fig.add_subplot(gs[0, 0])
        plt.title(self.name)
        plt.ylabel('Value')
        plt.xlabel('Date')

        self._anomalyGraph = fig.add_subplot(gs[1])

        plt.ylabel('Percentage')
        plt.xlabel('Date')

        # Maximizes window
        mng = plt.get_current_fig_manager()
        mng.resize(800, 600)

        plt.tight_layout()
项目:digital_rf    作者:MITHaystack    | 项目源码 | 文件源码
def plot_resid(d,savename='resfig1.png'):
    """
        Plots the residual frequency after the first wipe using the TLE velocity.
    """
    flim = [-2.e3, 2.e3]
    t = d['tvec']

    dates = [dt.datetime.fromtimestamp(ts) for ts in t]
    datenums = md.date2num(dates)
    xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')

    fig1 = plt.figure(figsize=(7, 9))
    doppler_residual = sp.interpolate.interp1d(d['tvec'],d['dopfit'])
    fvec = d["fvec"]
    res0 = d["res0"]
    res1 = d["res1"]
    plt.subplot(211)
    mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res0+1e-12)), vmin=-5, vmax=25)
    plt.plot(datenums, (150.0/400.0)*doppler_residual(t), "r--", label="doppler resid")
    ax = plt.gca()
    ax.xaxis.set_major_formatter(xfmt)
    plt.ylim(flim)
    plt.subplots_adjust(bottom=0.2)
    plt.xticks(rotation=25)
    plt.xlabel("UTC")
    plt.ylabel("Frequency (Hz)")
    plt.title("Power ch0 (dB) %1.2f MHz"%(150.012))
    plt.legend()
    plt.colorbar(mesh, ax=ax)

     # quicklook spectra of residuals spectra along with measured Doppler residual from second channel.
    plt.subplot(212)
    mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res1+1e-12)), vmin=-5, vmax=25)
    plt.plot(datenums, doppler_residual(t), "r--", label="doppler resid")
    ax = plt.gca()
    ax.xaxis.set_major_formatter(xfmt)
    plt.ylim(flim)
    plt.xlabel("UTC")
    plt.ylabel("Frequency (Hz)")
    plt.title("Power ch1 (dB), %1.2f MHz"%(400.032))
    plt.subplots_adjust(bottom=0.2)
    plt.xticks(rotation=25)
    plt.legend()
    plt.colorbar(mesh, ax=ax)

    plt.tight_layout()
    print('Saving residual plots: '+savename)
    plt.savefig(savename, dpi=300)
    plt.close(fig1)
项目:pyGrav    作者:basileh    | 项目源码 | 文件源码
def openRawdata(self):
        """
        - Display a file opening window
        - Populate a Campaign object: read all raw data
        - Set a new window for survey selection options
        - link selection options to apropriate functions:
          3 options are currently available: an automatic selection, a
          a selection with a user input file containing start-end dates for
          each survey, and a single srvey selection with a single start-end
          date 
          Each option calls the appropriate function:
              automaticSurveySelection()
              load_start_end_dates()
              askUserSingleSurvey()              
        which then calls the generic self.baseStationSelection() function
        which eventually calls the data populating methods within the 
        data_objects module with the appropriate options.
        """
        # open file
        fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.data_path)                
        campaigndata=Campaign()
        #populate a Campaign object
        campaigndata.readRawDataFile(fname)
        self.campaigndata=campaigndata
        if fname:
            #create new window and set as central Widget:
            surveySelectionWin=QtGui.QWidget()
            self.setCentralWidget(surveySelectionWin)        
            self.statusBar().showMessage("Please choose survey selection method")
            # create buttons and actions
            surveySelectionWin.btn1 = QtGui.QPushButton('automatic survey selection', self)
            surveySelectionWin.btn1.clicked.connect(self.automaticSurveySelection)
            surveySelectionWin.btn2 = QtGui.QPushButton('Load survey dates file', self)
            surveySelectionWin.btn2.clicked.connect(self.load_start_end_dates)
            surveySelectionWin.btn3 = QtGui.QPushButton('Single survey selection', self)
            surveySelectionWin.btn3.clicked.connect(self.askUserSingleSurvey)                        
            #locations                
            grid = QtGui.QGridLayout()
            grid.addWidget(surveySelectionWin.btn1,0,0,1,1)
            grid.addWidget(surveySelectionWin.btn2,1,0,1,1)  
            grid.addWidget(surveySelectionWin.btn3,2,0,1,1)          
            surveySelectionWin.setLayout(grid)   
            surveySelectionWin.setWindowTitle('Survey selections')    
            surveySelectionWin.show()
项目:pyGrav    作者:basileh    | 项目源码 | 文件源码
def askUserSingleSurvey(self):
        """
        ask the user for start and end dates of a single survey
        """
        self.survey_selection_option=3
        chooseSingleSurvey=QtGui.QWidget()
        self.setCentralWidget(chooseSingleSurvey)      
        self.statusBar().showMessage("Please enter start/end dates of a survey")


        chooseSingleSurvey.yr=QtGui.QLabel('year')
        chooseSingleSurvey.month=QtGui.QLabel('month')
        chooseSingleSurvey.day=QtGui.QLabel('day')
        chooseSingleSurvey.hr=QtGui.QLabel('hr')
        chooseSingleSurvey.mn=QtGui.QLabel('mn')
        chooseSingleSurvey.ss=QtGui.QLabel('ss')
        chooseSingleSurvey.yrEdit=QtGui.QLineEdit()
        chooseSingleSurvey.monthEdit=QtGui.QLineEdit()
        chooseSingleSurvey.dayEdit=QtGui.QLineEdit()
        chooseSingleSurvey.hrEdit=QtGui.QLineEdit()
        chooseSingleSurvey.mnEdit=QtGui.QLineEdit()
        chooseSingleSurvey.ssEdit=QtGui.QLineEdit()
        chooseSingleSurvey.yrEditend=QtGui.QLineEdit()
        chooseSingleSurvey.monthEditend=QtGui.QLineEdit()
        chooseSingleSurvey.dayEditend=QtGui.QLineEdit()
        chooseSingleSurvey.hrEditend=QtGui.QLineEdit()
        chooseSingleSurvey.mnEditend=QtGui.QLineEdit()
        chooseSingleSurvey.ssEditend=QtGui.QLineEdit()        
        # create buttons and actions
        chooseSingleSurvey.btn1 = QtGui.QPushButton('ok', self)
        chooseSingleSurvey.btn1.clicked.connect(lambda : self.setSingleSurvey(chooseSingleSurvey))

        #locations                
        grid = QtGui.QGridLayout()
        grid.addWidget(QtGui.QLabel('Start date'),1,1)
        grid.addWidget(QtGui.QLabel('End date'),1,2)        
        grid.addWidget(chooseSingleSurvey.yr,2,0)
        grid.addWidget(chooseSingleSurvey.yrEdit,2,1) 
        grid.addWidget(chooseSingleSurvey.yrEditend,2,2)         
        grid.addWidget(chooseSingleSurvey.month,3,0)
        grid.addWidget(chooseSingleSurvey.monthEdit,3,1)
        grid.addWidget(chooseSingleSurvey.monthEditend,3,2)        
        grid.addWidget(chooseSingleSurvey.day,4,0)
        grid.addWidget(chooseSingleSurvey.dayEdit,4,1)
        grid.addWidget(chooseSingleSurvey.dayEditend,4,2) 
        grid.addWidget(chooseSingleSurvey.hr,5,0)
        grid.addWidget(chooseSingleSurvey.hrEdit,5,1)
        grid.addWidget(chooseSingleSurvey.hrEditend,5,2)        
        grid.addWidget(chooseSingleSurvey.mn,6,0)
        grid.addWidget(chooseSingleSurvey.mnEdit,6,1)
        grid.addWidget(chooseSingleSurvey.mnEditend,6,2)        
        grid.addWidget(chooseSingleSurvey.ss,7,0)
        grid.addWidget(chooseSingleSurvey.ssEdit,7,1)   
        grid.addWidget(chooseSingleSurvey.ssEditend,7,2)   
        grid.addWidget(chooseSingleSurvey.btn1,8,0)
        chooseSingleSurvey.setLayout(grid)   
        chooseSingleSurvey.setWindowTitle('Survey selection')    
        chooseSingleSurvey.show()
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def fn_getdatetime_list(fn):
    """Extract all datetime strings from input filename
    """
    #Want to split last component
    fn = os.path.split(os.path.splitext(fn)[0])[-1]
    import re
    #WV01_12JUN152223255-P1BS_R1C1-102001001B3B9800__WV01_12JUN152224050-P1BS_R1C1-102001001C555C00-DEM_4x.tif
    #Need to parse above with month name 
    #Note: made this more restrictive to avoid false matches:
    #'20130304_1510_1030010020770600_1030010020CEAB00-DEM_4x'
    #This is a problem, b/c 2015/17/00:
    #WV02_20130315_10300100207D5600_1030010020151700
    #This code should be obsolete before 2019 
    #Assume new filenames
    #fn = fn[0:13]
    #Use cascading re find to pull out timestamps
    #Note: Want to be less restrictive here - could have a mix of YYYYMMDD_HHMM, YYYYMMDD and YYYY in filename
    #Should probably search for all possibilities, then prune  
    #NOTE: these don't include seconds in the time
    #NOTE: could have 20130304_1510__20130304__whatever in filename
    #The current approach will only catch the first datetime 
    dstr = None
    dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])[_T](?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
    if not dstr:
        dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
    if not dstr:
        dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:_|-)', fn)
        #This should pick up dates separated by a dash
        #dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])', fn)
    if not dstr:
        dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:_|-)', fn)
    #This is for USGS archive filenames
    if not dstr:
        dstr = re.findall(r'[0-3][0-9][a-z][a-z][a-z][0-9][0-9]', fn)
    #if not dstr:
    #    dstr = re.findall(r'(?:^|_)(?:19|20)[0-9][0-9]', fn)
    #This is a hack to remove peripheral underscores and dashes
    dstr = [d.lstrip('_').rstrip('_') for d in dstr]
    dstr = [d.lstrip('-').rstrip('-') for d in dstr]
    #This returns an empty list of nothing is found
    out = [strptime_fuzzy(s) for s in dstr]
    #This is USGS archive format
    #out = [datetime.strptime(s, '%d%b%y') for s in dstr][0]
    return out
项目:PyCS    作者:COSMOGRAIL    | 项目源码 | 文件源码
def factory(jds, mags, magerrs=None, telescopename="Unknown", object="Unknown", verbose=False):
    """Returns a valid lightcurve object from the provided arrays.
    The numpy arrays jds and mags are mandatory. If you do not specify a third array containing the magerrs,
    we will calculate them "automatically" (all the same value), to avoid having 0.0 errors.

    @type   jds: 1D numpy array
    @param  jds: julian dates
    @type   mags: 1D numpy array
    @param  mags: magnitudes
    @type   magerrs: 1D numpy array
    @param  magerrs: optional magnitude errors

    @todo: improve it and use this in file importing functions

    """
    # Make a brand new lightcurve object :
    newlc = lightcurve()

    # Of couse we can/should check a lot of things, but let's be naive :

    newlc.jds = np.asarray(jds)
    newlc.mags = np.asarray(mags)

    if magerrs is None:
        newlc.magerrs = np.zeros(len(newlc.jds)) + 0.1
    else:
        newlc.magerrs = np.asarray(magerrs)

    if len(newlc.jds) != len(newlc.mags) or len(newlc.jds) != len(newlc.magerrs):
        raise RuntimeError, "lightcurve factory called with arrays of incoherent lengths"

    newlc.mask = newlc.magerrs >= 0.0   # This should be true for all !

    newlc.properties = [{}] * len(newlc.jds)

    newlc.telescopename = telescopename
    newlc.object = object

    newlc.setindexlabels()
    newlc.commentlist = []

    newlc.sort() # not sure if this is needed / should be there

    newlc.validate()

    if verbose: print "New lightcurve %s with %i points" % (str(newlc), len(newlc.jds))

    return newlc
项目:workout-analyzer    作者:mctrinkle    | 项目源码 | 文件源码
def plot_volume_inensity_analysis_stacked_bar_graph(mus_names, wo_dates_list,
                                                    mus_vol, mus_int):

    x = wo_dates_list

    c_1 = np.array(mus_vol)
    c_1_colors = colors
    c_1_labels = mus_names
    c_2 = np.array(mus_int)
    c_2_colors = colors
    c_2_labels = c_1_labels
    ind = np.arange(len(c_1[0]))    
    width = .8   
    f, ax = plt.subplots(2, 1, sharex=True, figsize=(17,11))
    f.subplots_adjust(bottom=0.5) #make room for the legend
    locs, labels = plt.xticks()
    plt.setp(labels, rotation=25)
    p = [] # list of bar properties

    def create_subplot(matrix, colors, axis, title, dates):
        bar_renderers = []
        #ind = np.arange(matrix.shape[1])
        ind = dates
        bottoms = np.cumsum(np.vstack((np.zeros(matrix.shape[1]), matrix)), axis=0)[:-1]
        for i, row in enumerate(matrix):
            r = axis.bar(ind, row, width=0.5, color=colors[i], bottom=bottoms[i])
            bar_renderers.append(r)
        axis.set_title(title)
        axis.set_xlim((x[0],x[-1]))
        axis.xaxis_date()
        return bar_renderers

    p.extend(create_subplot(c_1, c_1_colors, ax[0], 'Volume', x))
    p.extend(create_subplot(c_2, c_2_colors, ax[1], 'Intensity', x))

    plt.suptitle('Volume and Intensity by Muscle Group')
    ax[0].set_ylabel('Total Volume') # add left y label
    ax[1].set_ylabel('Total Intensity') # add left y label
    ax[0].grid(True, color='w')
    ax[0].set_axis_bgcolor('black')
    ax[1].grid(True, color='w')
    ax[1].set_axis_bgcolor('black')
    f.legend(((x[0] for x in p)), # bar properties
             (c_1_labels), 
             bbox_to_anchor=(0.5, 0), 
             loc='lower center',
             ncol=4)
    f.canvas.set_window_title('WorkoutAnalyzer - Volume and Intensity by Muscle Group')
    f.set_size_inches(20,20)
    plt.subplots_adjust(left=0.08, bottom=0.20, right=0.9, top=0.93, wspace=0.20, hspace=0.10)
    plt.show()
项目:deep_heart_hackatho    作者:akshaynathr    | 项目源码 | 文件源码
def initializeLines(self, timestamp):
        print "initializing %s" % self.name
        anomalyRange = (0.0, 1.0)
        self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
        self.convertedDates = deque(
            [date2num(date) for date in self.dates], maxlen=WINDOW
        )
        self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
        self.rawValue = deque([0.0] * WINDOW, maxlen=WINDOW)
        self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
        self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
        self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)

        actualPlot, = self._mainGraph.plot(self.dates, self.value)
        self.actualLine = actualPlot
        rawPlot, = self._mainGraph.plot(self.dates, self.rawValue)
        self.rawLine = rawPlot
        predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
        self.predictedLine = predictedPlot
        self._mainGraph.legend(tuple(['actual', 'raw', 'predicted']), loc=3)

        anomalyScorePlot, = self._anomalyGraph.plot(
            self.dates, self.anomalyScore, 'm'
        )
        anomalyScorePlot.axes.set_ylim(anomalyRange)

        self.anomalyScoreLine = anomalyScorePlot
        anomalyLikelihoodPlot, = self._anomalyGraph.plot(
            self.dates, self.anomalyScore, 'r'
        )
        anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
        self.anomalyLikelihoodLine = anomalyLikelihoodPlot
        self._anomalyGraph.legend(
            tuple(['anomaly score', 'anomaly likelihood']), loc=3
        )

        dateFormatter = DateFormatter('%H:%M:%S.%f')
        self._mainGraph.xaxis.set_major_formatter(dateFormatter)
        self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)

        self._mainGraph.relim()
        self._mainGraph.autoscale_view(True, True, True)

        self.linesInitialized = True
项目:deep_heart_hackatho    作者:akshaynathr    | 项目源码 | 文件源码
def write(self, timestamp, value, predicted, anomalyScore, rawValue):

        # We need the first timestamp to initialize the lines at the right X value,
        # so do that check first.
        if not self.linesInitialized:
            self.initializeLines(timestamp)

        anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
            value, anomalyScore, timestamp
        )

        self.dates.append(timestamp)
        self.convertedDates.append(date2num(timestamp))
        self.value.append(value)
        self.rawValue.append(rawValue)
        self.allValues.append(value)
        self.allRawValues.append(rawValue)
        self.predicted.append(predicted)
        self.anomalyScore.append(anomalyScore)
        self.anomalyLikelihood.append(anomalyLikelihood)

        # Update main chart data
        self.actualLine.set_xdata(self.convertedDates)
        self.actualLine.set_ydata(self.value)
        self.rawLine.set_xdata(self.convertedDates)
        self.rawLine.set_ydata(self.rawValue)
        self.predictedLine.set_xdata(self.convertedDates)
        self.predictedLine.set_ydata(self.predicted)
        # Update anomaly chart data
        self.anomalyScoreLine.set_xdata(self.convertedDates)
        self.anomalyScoreLine.set_ydata(self.anomalyScore)
        self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
        self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)

        # Remove previous highlighted regions
        for poly in self._chartHighlights:
            poly.remove()
        self._chartHighlights = []

        # weekends = extractWeekendHighlights(self.dates)
        anomalies = extractAnomalyIndices(self.anomalyLikelihood)

        # Highlight weekends in main chart
        # self.highlightChart(weekends, self._mainGraph)

        # Highlight anomalies in anomaly chart
        self.highlightChart(anomalies, self._anomalyGraph)

        maxValue = max(max(self.allValues), max(self.allRawValues))
        self._mainGraph.relim()
        self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))

        self._mainGraph.relim()
        self._mainGraph.autoscale_view(True, scaley=False)
        self._anomalyGraph.relim()
        self._anomalyGraph.autoscale_view(True, True, True)

        plt.draw()
    plt.pause(0.00000000001)