Python math 模块,log1p() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用math.log1p()

项目:q2-coordinates    作者:nbokulich    | 项目源码 | 文件源码
def plot_basemap(latitude, longitude, image, color_palette=None):
    # define basemap, color palette
    cmap, tiler = get_map_params(image, color_palette)

    # Find min/max coordinates to set extent
    lat_0, lat_1, lon_0, lon_1 = get_max_extent(latitude, longitude)

    # Automatically focus resolution
    max_span = max((abs(lat_1) - abs(lat_0)), (abs(lon_1) - abs(lon_0)))
    res = int(-1.4 * math.log1p(max_span) + 13)

    # initiate tiler projection
    ax = plt.axes(projection=tiler.crs)
    # Define extents of any plotted data
    ax.set_extent((lon_0, lon_1, lat_0, lat_1), ccrs.Geodetic())
    # add terrain background
    ax.add_image(tiler, res)

    return ax, cmap
项目:sparks    作者:ImpactHorizon    | 项目源码 | 文件源码
def detect_peaks(hist, count=2):
    hist_copy = hist    
    peaks = len(argrelextrema(hist_copy, np.greater, mode="wrap")[0])
    sigma = log1p(peaks)
    print(peaks, sigma)
    while (peaks > count):
        new_hist = gaussian_filter(hist_copy, sigma=sigma)        
        peaks = len(argrelextrema(new_hist, np.greater, mode="wrap")[0])
        if peaks < count:
            peaks = count + 1
            sigma = sigma * 0.5
            continue
        hist_copy = new_hist
        sigma = log1p(peaks)
    print(peaks, sigma)
    return argrelextrema(hist_copy, np.greater, mode="wrap")[0]
项目:qgis-shapetools-plugin    作者:NationalSecurityAgency    | 项目源码 | 文件源码
def log1p(x):
    """log(1 + x) accurate for small x (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.log1p(x)

    y = 1 + x
    z = y - 1
    # Here's the explanation for this magic: y = 1 + z, exactly, and z
    # approx x, thus log(y)/z (which is nearly constant near z = 0) returns
    # a good approximation to the true log(1 + x)/x.  The multiplication x *
    # (log(y)/z) introduces little additional error.
    return x if z == 0 else x * math.log(y) / z
项目:qgis-shapetools-plugin    作者:NationalSecurityAgency    | 项目源码 | 文件源码
def atanh(x):
    """atanh(x) (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.atanh(x)

    y = abs(x)                  # Enforce odd parity
    y = Math.log1p(2 * y/(1 - y))/2
    return -y if x < 0 else y
项目:lopocs    作者:Oslandia    | 项目源码 | 文件源码
def compute_scale_for_cesium(coordmin, coordmax):
    '''
    Cesium quantized positions need to be in uint16
    This function computes the best scale to apply to coordinates
    to fit the range [0, 65535]
    '''
    max_int = np.iinfo(np.uint16).max
    delta = abs(coordmax - coordmin)
    scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10)))
    return scale
项目:p2pool-bch    作者:amarian12    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:p2pool-unitus    作者:amarian12    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:p2pool-dgb-sha256    作者:ilsawa    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:p2pool-ltc    作者:ilsawa    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:p2pool-bsty    作者:amarian12    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:mandos    作者:carolinetomo    | 项目源码 | 文件源码
def logsum(l):
    x = l[0]
    for i in l[1:]:
        try:
            x += math.log1p(math.exp(i-x))
        except:
            x += 0
    return x
项目:p2pool-cann    作者:ilsawa    | 项目源码 | 文件源码
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def log_add(log_x, log_y):
    """Given log x and log y, returns log(x + y)."""
    # Swap variables so log_y is larger.
    if log_x > log_y:
        log_x, log_y = log_y, log_x

    # Use the log(1 + e^p) trick to compute this efficiently
    # If the difference is large enough, this is effectively log y.
    delta = log_y - log_x
    return math.log1p(math.exp(delta)) + log_x if delta <= 50.0 else log_y
项目:tichu-tournament    作者:aragos    | 项目源码 | 文件源码
def _log_rps(self, rps):
        if rps > 0:
            return math.log1p(rps)
        else: 
            return -math.log1p(-rps)
项目:hyper-engine    作者:maxim5    | 项目源码 | 文件源码
def log1p(node): return merge([node], math.log1p)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def atanh(x):
    _atanh_special = [
        [complex(-0.0, -pi/2), complex(-0.0, -pi/2), complex(-0.0, -pi/2),
            complex(-0.0, pi/2), complex(-0.0, pi/2), complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            nan+nanj],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nanj],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nan+nanj],
        [-1j*pi/2, -1j*pi/2, -1j*pi/2, 1j*pi/2, 1j*pi/2, 1j*pi/2,  nanj],
        [-1j*pi/2, nan+nanj, nan+nanj, nan+nanj, nan+nanj, 1j*pi/2, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        return _atanh_special[_special_type(z.real)][_special_type(z.imag)]

    if z.real < 0:
        return -atanh(-z)

    ay = abs(z.imag)
    if z.real > _SQRT_LARGE_DOUBLE or ay > _SQRT_LARGE_DOUBLE:
        hypot = math.hypot(z.real/2, z.imag/2)
        return complex(z.real/4/hypot/hypot, -math.copysign(pi/2, -z.imag))
    if z.real == 1 and ay < _SQRT_DBL_MIN:
        if ay == 0:
            raise ValueError
        return complex(-math.log(math.sqrt(ay)/math.sqrt(math.hypot(ay, 2))),
                       math.copysign(math.atan2(2, -ay)/2, z.imag))
    return complex(math.log1p(4*z.real/((1-z.real)*(1-z.real) + ay*ay))/4,
                   -math.atan2(-2*z.imag, (1-z.real)*(1+z.real) - ay*ay)/2)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
项目:paragraph2vec    作者:thunlp    | 项目源码 | 文件源码
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
项目:nonce2vec    作者:minimalparts    | 项目源码 | 文件源码
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
项目:ESPSS    作者:rcalinjageman    | 项目源码 | 文件源码
def nct(tval, delta, df):
  #This is the non-central t-distruction function
  #This is a direct translation from visual-basic to Python of the nct implementation by Geoff Cumming for ESCI

  #Noncentral t function, after Excel worksheet calculation by Geoff Robinson
  #The three parameters are:
  #   tval -- our t value
  #   delta -- noncentrality parameter
  #   df -- degrees of freedom

  #Dim dfmo As Integer     'for df-1
  #Dim tosdf As Double     'for tval over square root of df
  #Dim sep As Double       'for separation of points
  #Dim sepa As Double      'for temp calc of points
  #Dim consta As Double    'for constant
  #Dim ctr As Integer      'loop counter

  dfmo = df - 1.0
  tosdf = tval / math.sqrt(df)
  sep = (math.sqrt(df) + 7) / 100
  consta = math.exp( (2 - df) * 0.5 * math.log1p(1) - math.lgamma(df/2) ) * sep /3

  #now do first term in cross product summation, with df=0 special
  if dfmo > 0:
    nctvalue = stdnormdist(0 * tosdf - delta) * 0 ** dfmo * math.exp(-0.5* 0 * 0)
  else:  
    nctvalue = stdnormdist(0 * tosdf - delta) * math.exp(-0.5 * 0 * 0)

  #now add in second term, with multiplier 4
  nctvalue = nctvalue + 4 * stdnormdist(sep*tosdf - delta) * sep ** dfmo * math.exp(-0.5*sep*sep)    

  #now loop 49 times to add 98 terms
  for ctr in range(1,49):
      sepa = 2 * ctr * sep
      nctvalue = nctvalue + 2 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)
      sepa = sepa + sep
      nctvalue = nctvalue + 4 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #add in last term
  sepa = sepa + sep
  nctvalue = nctvalue + stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #multiply by the constant and we've finished
  nctvalue = nctvalue * consta

  return nctvalue
项目:SparseArray    作者:INGEOTEC    | 项目源码 | 文件源码
def test_one():
    from math import sin, cos, tan, asin, acos, atan
    from math import sinh, cosh, tanh, asinh, acosh, atanh
    from math import exp, expm1, log, log10, log1p, sqrt, lgamma
    from math import fabs, ceil, floor, trunc, erf, erfc
    try:
        from math import log2
    except ImportError:
        def log2(x):
            return log(x) / log(2)

    def wrapper(f, v):
        try:
            return f(v)
        except ValueError:
            if f == sqrt:
                return float('nan')
            if v >= 0:
                return float('inf')
            else:
                return -float('inf')

    def compare(a, b):
        if isfinite(a) and isfinite(b):
            return assert_almost_equals(a, b)
        return str(a) == str(b)

    for f in [sin, cos, tan, asin, acos, atan,
              sinh, cosh, tanh, asinh, acosh, atanh,
              exp, expm1, log, log2, log10, log1p, sqrt,
              lgamma,
              fabs, ceil, floor, trunc,
              erf, erfc]:
        for p in [0.5, 1]:
            a = random_lst(p=p)
            b = SparseArray.fromlist(a)
            c = getattr(b, f.__name__)()
            res = [wrapper(f, x) for x in a]
            index = [k for k, v in enumerate(res) if v != 0]
            res = [x for x in res if x != 0]
            print(f, p, c.non_zero, len(res))
            assert c.non_zero == len(res)
            [assert_almost_equals(v, w) for v, w in zip(index,
                                                        c.index)]
            [compare(v, w) for v, w in zip(res,
                                           c.data)]