Python scipy 模块,minimum() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用scipy.minimum()

项目:house-price-map    作者:andyljones    | 项目源码 | 文件源码
def with_walking(time_arr, mins_per_square=1.3, transfer_constant=5):
    arr = time_arr.copy()
    cross_footprint = sp.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(bool)
    diag_footprint = sp.array([[1, 0, 1],[0, 1, 0], [1, 0, 1]]).astype(bool)
    arr[sp.isnan(arr)] = sp.inf
    for i in range(60):
        cross_arr = sp.ndimage.minimum_filter(arr, footprint=cross_footprint)
        cross_arr[sp.isnan(cross_arr)] = sp.inf
        cross_changes = (cross_arr != arr)
        cross_arr[cross_changes] += 1*mins_per_square

        diag_arr = sp.ndimage.minimum_filter(arr, footprint=diag_footprint)
        diag_arr[sp.isnan(diag_arr)] = sp.inf
        diag_changes = (diag_arr != arr)
        diag_arr[diag_changes] += 1.4*mins_per_square

        arr = sp.minimum(cross_arr, diag_arr)

    arr[sp.isinf(arr)] = sp.nan

    return arr + transfer_constant
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:AutoML4    作者:djajetic    | 项目源码 | 文件源码
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
项目:digital_rf    作者:MITHaystack    | 项目源码 | 文件源码
def open_file(maindir):
    """
        Creates the digital RF reading object.

        Args:
            maindir (:obj:'str'): The directory where the data is located.

        Returns:
            drfObj (obj:"DigitalRFReader"): Digital RF Reader object.
            chandict (obj:"dict"): Dictionary that holds info for the channels.
            start_indx (obj:'long'): Start index in samples.
            end_indx (obj:'long'): End index in samples.
    """
    mainpath = os.path.expanduser(maindir)
    drfObj = drf.DigitalRFReader(mainpath)
    chans = drfObj.get_channels()

    chandict={}

    start_indx, end_indx=[0, sp.inf]
    # Get channel info
    for ichan in chans:
        curdict = {}

        curdict['sind'], curdict['eind'] = drfObj.get_bounds(ichan)
        # determine the read boundrys assuming the sampling is the same.
        start_indx = sp.maximum(curdict['sind'], start_indx)
        end_indx = sp.minimum(curdict['eind'], end_indx)

        dmetadict = drfObj.read_metadata(start_indx, end_indx, ichan)
        dmetakeys = dmetadict.keys()

        curdict['sps'] = dmetadict[dmetakeys[0]]['samples_per_second']
        curdict['fo'] = dmetadict[dmetakeys[0]]['center_frequencies'].ravel()[0]
        chandict[ichan] = curdict

    return (drfObj, chandict, start_indx, end_indx)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:automl_gpu    作者:abhishekkrthakur    | 项目源码 | 文件源码
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def _computeBGDiff(self):
        prevImg = self._imageBuffer[0].asMatrix2D()
        curImg = self._imageBuffer.getMiddle().asMatrix2D()
        nextImg = self._imageBuffer[-1].asMatrix2D()

        delta1 = sp.absolute(curImg - prevImg)   #frame diff 1
        delta2 = sp.absolute(nextImg - curImg)   #frame diff 2

        #use element-wise minimum of the two difference images, which is what
        # gets compared to threshold to yield foreground mask
        return sp.minimum(delta1, delta2)
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def loglossl(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def my_logloss(act, pred):
    epsilon = 1e-15
    pred = K.maximum(epsilon, pred)
    pred = K.minimum(1 - epsilon, pred)
    ll = K.sum(act * K.log(pred) + (1 - act) * K.log(1 - pred))
    ll = ll * -1.0 / K.shape(act)[0]

    return ll
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def logloss(act, pred):
    '''
    ????????
    :param act: 
    :param pred: 
    :return: 
    '''
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
项目:seq2seq-keyphrase    作者:memray    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:AutoML-Challenge    作者:postech-mlg-exbrain    | 项目源码 | 文件源码
def log_loss(solution, prediction, task=BINARY_CLASSIFICATION):
    """Log loss for binary and multiclass."""
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction)  # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == MULTICLASS_CLASSIFICATION) and (label_num > 1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k, :] /= sp.maximum(norma[k], eps)
        # Make sure there is a single label active per line for multi-class
        # classification
        sol = binarize_predictions(solution, task=MULTICLASS_CLASSIFICATION)
        # For the base prediction, this solution is ridiculous in the
        # multi-label case

        # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum(1 - eps, sp.maximum(eps, pred))
    # Compute the log loss
    pos_class_log_loss = -np.mean(sol * np.log(pred), axis=0)
    if (task != MULTICLASS_CLASSIFICATION) or (label_num == 1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = -np.mean((1 - sol) * np.log(1 - pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss)
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss)
        # print('multiclass {}'.format(log_loss))
    return log_loss
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:Tencent2017_Final_Coda_Allegro    作者:BladeCoda    | 项目源码 | 文件源码
def logloss(act, pred):
  epsilon = 1e-15
  pred = sp.maximum(epsilon, pred)
  pred = sp.minimum(1-epsilon, pred)
  ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
  ll = ll * -1.0/len(act)
  return ll
项目:Tencent2017_Final_Coda_Allegro    作者:BladeCoda    | 项目源码 | 文件源码
def logloss(act, pred):
  epsilon = 1e-15
  pred = sp.maximum(epsilon, pred)
  pred = sp.minimum(1-epsilon, pred)
  ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
  ll = ll * -1.0/len(act)
  return ll
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p)))
    res *= -1.0/len(y)
    return res
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:Tencent_Social_Ads    作者:freelzy    | 项目源码 | 文件源码
def logloss(act, preds):
    epsilon = 1e-15
    preds = sp.maximum(epsilon, preds)
    preds = sp.minimum(1 - epsilon, preds)
    ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
    ll = ll * -1.0 / len(act)
    return ll
项目:Tencent_Social_Ads    作者:freelzy    | 项目源码 | 文件源码
def logloss(act, preds):
    epsilon = 1e-15
    preds = sp.maximum(epsilon, preds)
    preds = sp.minimum(1 - epsilon, preds)
    ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
    ll = ll * -1.0 / len(act)
    return ll
项目:Tencent_Social_Ads    作者:freelzy    | 项目源码 | 文件源码
def logloss(act, preds):
    epsilon = 1e-15
    preds = sp.maximum(epsilon, preds)
    preds = sp.minimum(1 - epsilon, preds)
    ll = sum(act * sp.log(preds) + sp.subtract(1, act) * sp.log(sp.subtract(1, preds)))
    ll = ll * -1.0 / len(act)
    return ll
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def nms(dets,proba, T):

        dets = dets.astype("float")
        if len(dets) == 0:
            return []

        x1 = dets[:, 0]
        y1 = dets[:, 1]
        x2 = dets[:, 2]
        y2 = dets[:, 3]
        scores = proba

        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = sp.maximum(x1[i], x1[order[1:]])
            yy1 = sp.maximum(y1[i], y1[order[1:]])
            xx2 = sp.minimum(x2[i], x2[order[1:]])
            yy2 = sp.minimum(y2[i], y2[order[1:]])

            w = sp.maximum(0.0, xx2 - xx1 + 1)
            h = sp.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            inds = sp.where(ovr <= T)[0]
            order = order[inds + 1]

        return keep
项目:Tencent_Social_Advertising_Algorithm_Competition    作者:guicunbin    | 项目源码 | 文件源码
def logloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
项目:Tencent_Social_Advertising_Algorithm_Competition    作者:guicunbin    | 项目源码 | 文件源码
def self_eval(pred,train_data):
    '''
    :pred 
    :train_data ?? labels
    '''
    try:
        labels=train_data.get_label()
    except:
        labels=train_data
    epsilon = 1e-15
    pred = np.maximum(epsilon,  pred)
    pred = np.minimum(1-epsilon,pred)
    ll = sum(labels*np.log(pred) + (1 - labels)*np.log(1 - pred))
    ll = ll * (-1.0)/len(labels)
    return 'log loss', ll, False
项目:CopyNet    作者:MingyuanXie    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:tencent_social_algo    作者:Folieshell    | 项目源码 | 文件源码
def logloss(act, pred):
  epsilon = 1e-15
  pred = sp.maximum(epsilon, pred)
  pred = sp.minimum(1-epsilon, pred)
  ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
  ll = ll * -1.0/len(act)
  return ll
项目:tencent_social_algo    作者:Folieshell    | 项目源码 | 文件源码
def logloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
    res *= -1.0/len(y)
    return res
项目:GitImpact    作者:ludovicdmt    | 项目源码 | 文件源码
def logloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def binary_logloss(p, y):
    epsilon = 1e-15
    p = sp.maximum(epsilon, p)
    p = sp.minimum(1-epsilon, p)
    res = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p)))
    res *= -1.0/len(y)
    return res
项目:digital_rf    作者:MITHaystack    | 项目源码 | 文件源码
def outlier_removed_fit(m, w = None, n_iter=10, polyord=7):
    """
    Remove outliers using fited data.

    Args:
        m (:obj:`numpy array`): Phase curve.
        n_iter (:obj:'int'): Number of iteration outlier removal
        polyorder (:obj:'int'): Order of polynomial used.

    Returns:
        fit (:obj:'numpy array'): Curve with outliers removed
    """
    if w is None:
        w = sp.ones_like(m)
    W = sp.diag(sp.sqrt(w))
    m2 = sp.copy(m)
    tv = sp.linspace(-1, 1, num=len(m))
    A = sp.zeros([len(m), polyord])
    for j in range(polyord):
        A[:, j] = tv**(float(j))
    A2 = sp.dot(W,A)
    m2w = sp.dot(m2,W)
    fit = None
    for i in range(n_iter):
        xhat = sp.linalg.lstsq(A2, m2w)[0]
        fit = sp.dot(A, xhat)
        # use gradient for central finite differences which keeps order
        resid = sp.gradient(fit - m2)
        std = sp.std(resid)
        bidx = sp.where(sp.absolute(resid) > 2.0*std)[0]
        for bi in bidx:
            A2[bi,:]=0.0
            m2[bi]=0.0
            m2w[bi]=0.0
    if debug_plot:
        plt.plot(m2,label="outlier removed")
        plt.plot(m,label="original")
        plt.plot(fit,label="fit")
        plt.legend()
        plt.ylim([sp.minimum(fit)-std*3.0,sp.maximum(fit)+std*3.0])
        plt.show()
    return(fit)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def _computeBGDiff(self):
        self._flow.update( self._imageBuffer.getLast() )

        n = len(self._imageBuffer)        
        prev_im = self._imageBuffer[0]
        forward = None
        for i in range(0,n/2):
            if forward == None:
                forward = self._imageBuffer[i].to_next
            else:
                forward = forward * self._imageBuffer[i].to_next

        w,h = size = prev_im.size
        mask = cv.CreateImage(size,cv.IPL_DEPTH_8U,1)
        cv.Set(mask,0)
        interior = cv.GetSubRect(mask, pv.Rect(2,2,w-4,h-4).asOpenCV()) 
        cv.Set(interior,255)
        mask = pv.Image(mask)

        prev_im = forward(prev_im)
        prev_mask = forward(mask)


        next_im = self._imageBuffer[n-1]
        back = None
        for i in range(n-1,n/2,-1):
            if back == None:
                back = self._imageBuffer[i].to_prev
            else:
                back = back * self._imageBuffer[i].to_prev

        next_im = back(next_im)
        next_mask = back(mask)

        curr_im = self._imageBuffer[n/2]


        prevImg = prev_im.asMatrix2D()
        curImg  = curr_im.asMatrix2D()
        nextImg = next_im.asMatrix2D()
        prevMask = prev_mask.asMatrix2D()
        nextMask = next_mask.asMatrix2D()

        # Compute transformed images
        delta1 = sp.absolute(curImg - prevImg)   #frame diff 1
        delta2 = sp.absolute(nextImg - curImg)   #frame diff 2

        delta1 = sp.minimum(delta1,prevMask)
        delta2 = sp.minimum(delta2,nextMask)

        #use element-wise minimum of the two difference images, which is what
        # gets compared to threshold to yield foreground mask
        return sp.minimum(delta1, delta2)