Python numpy 模块,tanh() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.tanh()

项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """The derivative of sigmoid is

        .. math:: \\frac{dy}{dx} & = (1-\\varphi(x)) \\otimes \\varphi(x)  \\\\
                  & = \\frac{e^{-x}}{(1+e^{-x})^2} \\\\
                  & = \\frac{e^x}{(1+e^x)^2}

        Returns
        -------
        float32
            The derivative of sigmoid function.
        """
        last_forward = self.forward(input) if input else self.last_forward
        return np.multiply(last_forward, 1 - last_forward)


# sigmoid-end
# tanh-start
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, input):
        """This function is easily defined as the ratio between the hyperbolic 
        sine and the cosine functions (or expanded, as the ratio of the 
        half?difference and half?sum of two exponential functions in the 
        points :math:`z` and :math:`-z`):

        .. math:: tanh(z) & = \\frac{sinh(z)}{cosh(z)} \\\\
                  & = \\frac{e^z - e^{-z}}{e^z + e^{-z}}

        Fortunately, numpy provides :meth:`tanh` methods. So in our implementation,
        we directly use :math:`\\varphi(x) = \\tanh(x)`.

        Parameters
        ----------
        x : float32
            The activation (the summed, weighted input of a neuron).

        Returns
        -------
        float32 in [-1, 1]
            The output of the tanh function applied to the activation.
        """
        self.last_forward = np.tanh(input)
        return self.last_forward
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """The derivative of :meth:`tanh` functions is

        .. math:: \\frac{d}{dx} tanh(x) & = \\frac{d}{dx} \\frac{sinh(x)}{cosh(x)} \\\\
                  & = \\frac{cosh(x) \\frac{d}{dx}sinh(x) - sinh(x) \\frac{d}{dx}cosh(x) }{ cosh^2(x)} \\\\
                  & = \\frac{ cosh(x) cosh(x) - sinh(x) sinh(x) }{ cosh^2(x)}  \\\\
                  & = 1 - tanh^2(x) 

        Returns
        -------
        float32 
            The derivative of tanh function.
        """
        last_forward = self.forward(input) if input else self.last_forward
        return 1 - np.power(last_forward, 2)


# tanh-end
# relu-start
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def __init__(self, n_out, n_in=None, nb_batch=None, nb_seq=None,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', return_sequence=False):
        self.n_out = n_out
        self.n_in = n_in
        self.nb_batch = nb_batch
        self.nb_seq = nb_seq
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation_cls = activations.get(activation).__class__
        self.activation = activations.get(activation)
        self.return_sequence = return_sequence

        self.out_shape = None
        self.last_input = None
        self.last_output = None
项目:MachineLearningPracticePrograms    作者:Subarno    | 项目源码 | 文件源码
def sample(h, seed_ix, n):
    """
    sample a sequence of integers from the model
    h is memory state, seed_ix is seed letter for first time step
    """
    x = np.zeros((vocab_size, 1))
    x[seed_ix] = 1
    ixes = []
    for t in range(n):
        h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
        y = np.dot(Why, h) + by
        p = np.exp(y) / np.sum(np.exp(y))
        ix = np.random.choice(range(vocab_size), p=p.ravel())
        x = np.zeros((vocab_size, 1))
        x[ix] = 1
        ixes.append(ix)
    return ixes
项目:MachineLearningPracticePrograms    作者:Subarno    | 项目源码 | 文件源码
def sample(h, seed_ix, n):
    """
    sample a sequence of integers from the model
    h is memory state, seed_ix is seed letter for first time step
    """
    x = np.zeros((vocab_size, 1))
    x[seed_ix] = 1
    ixes = []
    for t in range(n):
        h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
        y = np.dot(Why, h) + by
        p = np.exp(y) / np.sum(np.exp(y))
        ix = np.random.choice(range(vocab_size), p=p.ravel())
        x = np.zeros((vocab_size, 1))
        x[ix] = 1
        ixes.append(ix)
    return ixes
项目:mlscratch    作者:rockyzhengwu    | 项目源码 | 文件源码
def sample(h, seed_ix, n):
    """
    sample a sequence of integers from the model
    h is memory state, seed_ix is seed letter for first time step
    """
    x = np.zeros((vocab_size, 1))
    x[seed_ix] = 1
    ixes = []
    for t in range(n):
        h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
        y = np.dot(Why, h) + by
        p = np.exp(y) / np.sum(np.exp(y))
        ix = np.random.choice(range(vocab_size), p=p.ravel())
        x = np.zeros((vocab_size, 1))
        x[ix] = 1
        ixes.append(ix)
    return ixes
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation=tanh,
            clip_gradients=False, init_zero=False):

        self.n_in = n_in
        self.n_out = n_out
        self.activation = activation
        self.clip_gradients = clip_gradients

        #self.in_gate = RecurrentLayer(n_in, n_out, sigmoid, clip_gradients, init_zero)
        #self.forget_gate = RecurrentLayer(n_in, n_out, sigmoid, clip_gradients, init_zero)
        #self.out_gate = RecurrentLayer(n_in, n_out, sigmoid, clip_gradients, init_zero)
        self.in_gate = RecurrentLayer(n_in+n_out, n_out, sigmoid, clip_gradients, init_zero)
        self.out_gate = RecurrentLayer(n_in+n_out, n_out, sigmoid, clip_gradients, init_zero)
        self.input_layer = RecurrentLayer(n_in, n_out, activation, clip_gradients, init_zero)


        self.internal_layers = [ self.input_layer, self.in_gate,
                                 self.out_gate]#,  self.forget_gate]
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation=tanh,
            order=1, clip_gradients=False, BN=False):

        self.n_in = n_in
        self.n_out = n_out
        self.activation = activation
        self.order = order
        self.clip_gradients = clip_gradients

        # batch, in, row, col
        self.input_shape = (None, n_in, 1, None)
        # out, in, row, col
        self.filter_shape = (n_out, n_in, 1, order)
        self.W = create_shared(random_init(self.filter_shape), name="W")
        if not BN:
            self.bias = create_shared(random_init((n_out,)), name="bias")

        self.BNLayer = None
        self.BN = BN
        if BN:
            # calculate appropriate input_shape, (mini_batch_size, # of channel, # row, # column)
            new_shape = list(self.input_shape)
            new_shape[1] = self.filter_shape[0]
            new_shape = tuple(new_shape)
            self.BNLayer = BatchNormalization(new_shape, mode=1)
项目:vanilla-neural-nets    作者:cavaunpeu    | 项目源码 | 文件源码
def _feed_forward(self, x):
        time_steps = len(x)
        initial_hidden_state = np.zeros(self.hidden_layer_size)
        hidden_state = deque([initial_hidden_state])
        softmax_outputs = deque()

        for t in np.arange(time_steps):
            hidden_state.append(
                np.tanh( self.parameters.W_xh.value[:, x[t]] + self.parameters.W_hh.value @ hidden_state[-1] )
            )
            softmax_outputs.append(
                self._compute_softmax( self.parameters.W_hy.value @ hidden_state[-1] )
            )
        # move initial hidden state to end of deque, such that it is later our
        # `hidden_state[t-1]` at t=0
        hidden_state.rotate(-1)

        return np.array(softmax_outputs), np.array(hidden_state)
项目:aRNNie    作者:MojoJolo    | 项目源码 | 文件源码
def forward(self, inputs, targets, hidden_prev):
        # s = vector
        input_xs = {}
        hidden_s = {}
        output_ys = {}
        probs = {} # probablity

        hidden_s[-1] = np.copy(hidden_prev)
        loss = 0

        for i in xrange(len(inputs)):
            # Creating an equivalent one hot vector for each inputs
            input_xs[i] = np.zeros((self.vocab_size, 1))
            input_xs[i][inputs[i]] = 1

            # Calculating the current hidden state using the previous hiden state through tanh
            hidden_s[i] = self.tanh(self.param_w_xh, input_xs[i], self.param_w_hh, hidden_s[i - 1], self.bias_hidden)
            output_ys[i] = np.dot(self.param_w_hy, hidden_s[i]) + self.bias_output_y

            probs[i] = self.softmax(output_ys[i])
            loss += -np.log(probs[i][targets[i], 0])

        return input_xs, output_ys, hidden_s, probs, loss

    # backprop
项目:aRNNie    作者:MojoJolo    | 项目源码 | 文件源码
def generate(self, hidden, seed_ix, chars_counter):
        input_x = np.zeros((self.vocab_size, 1))
        input_x[seed_ix] = 1
        ixes = []

        for i in xrange(chars_counter):
            hidden = np.tanh(np.dot(self.param_w_xh, input_x) + np.dot(self.param_w_hh, hidden) + self.bias_hidden) # tanh
            output_y = np.dot(self.param_w_hy, hidden) + self.bias_output_y
            prob = self.softmax(output_y)
            ix = np.random.choice(range(self.vocab_size), p=prob.ravel())

            input_x = np.zeros((self.vocab_size, 1))
            input_x[ix] = 1

            ixes.append(ix)

        return [self.ix_to_char[ix] for ix in ixes]
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __init__(self, in_size, hidden_size, encoder_activation='tanh',
                 decoder_activation='tanh', decoder_return_sequence=True):

        assert encoder_activation in ('tanh', 'identity', ), "invalid encoder_activation"
        self.encoder_activation = encoder_activation
        assert decoder_activation in ('tanh', 'identity', ), "invalid decoder_activation"
        self.decoder_activation = decoder_activation

        self.hidden_size = hidden_size
        self.in_size = in_size
        # encoder
        self.Wxh_enc = np.zeros((hidden_size, in_size))  # input to hidden
        self.Whh_enc = np.zeros((hidden_size, hidden_size))  # hidden to hidden
        self.bh_enc = np.zeros((hidden_size, 1))  # hidden bias
        # decoder
        self.Wxh_dec = np.zeros((hidden_size, in_size))  # input to hidden
        self.Whh_dec = np.zeros((hidden_size, hidden_size))  # hidden to hidden
        self.bh_dec = np.zeros((hidden_size, 1))  # hidden bias
        self.decoder_return_sequence = decoder_return_sequence
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def density_profile(rho):
    """density profile, fixed in time.
    Inputs:
      rho       normalized radial coordinate rho=r/a (array)
    Outputs:
      T         density profile in SI (array)
    """
    minorRadius = 0.594  # a
    majorRadius = 1.65  # R0
    inverseAspectRatio = minorRadius / majorRadius
    rho0 = 0.5

    # density profile
    n0 = 3.3e19;     # in SI, m^-3
    kappa_n = 2.22;  # R0 / Ln    
    deltar = 0.5
    rhominus = rho - rho0 + deltar/2
    deltan = 0.1
    n = n0 * np.exp( -kappa_n * inverseAspectRatio * (rho - rho0 - deltan * (np.tanh(rhominus/deltan) - np.tanh(deltar/2/deltan))))

    # set n to a constant for rho < rho0-deltar/2
    ind = int(np.abs(rho - (rho0 - deltar/2)).argmin())
    ind2 = (rho < (rho0-deltar/2))
    n[ind2] = n[ind];
    return n
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def temperature_initial_condition(rho):
    """Initial temperature profile
    Inputs:
      rho       normalized radial coordinate rho=r/a (array)
    Outputs:
      T         temperature profile in SI (array)
    """
    e = 1.60217662e-19          # electron charge

    kappa_T = 6.96
    deltar = 0.9
    rho0 = 0.5
    rhominus = rho - rho0 + deltar/2
    deltaT = 0.1

    e = 1.60217662e-19
    T0 = 1000*e
    invasp = 0.36
    T = T0 * np.exp( -kappa_T * invasp * (rho - rho0 - deltaT * (np.tanh(rhominus/deltaT) - np.tanh(deltar/2/deltaT))));
    ind = int(np.abs(rho - (rho0 - deltar/2)).argmin())
    ind2 = (rho < (rho0-deltar/2));
    T[ind2] = T[ind];
    return T
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def test_basic(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      rnd = np.random.RandomState(0)
      x = self.get_random_tensor([18, 12], rnd=rnd)
      y = tf.tanh(x)
      self.assert_bw_fw(sess, x, y, rnd=rnd)

    def test_manual(self):
      with tf.Graph().as_default(), tf.device("/cpu:0"):
        with self.test_session() as sess:
          x_val = np.random.uniform(0, 1)
          x = tf.constant(x_val)
          y = tf.tanh(x)
          dy_dx = forward_gradients(y, x, gate_gradients=True)
          dy_dx_tf = sess.run(dy_dx)
          eps = 1e-5
          x_val = x_val - eps
          y_val_1 = np.tanh(x_val)
          x_val = x_val + 2 * eps
          y_val_2 = np.tanh(x_val)
          dy_dx_fd = (y_val_2 - y_val_1) / (2 * eps)
          np.testing.assert_allclose(dy_dx_tf, dy_dx_fd, rtol=1e-5)
项目:lotto    作者:hhh5460    | 项目源码 | 文件源码
def calculate_loss(self, X, y, model):
        num_examples = len(X)
        lamda = 0.01 # regularization strength

        Wi, bh, Wh, bo = model['Wi'], model['bh'], model['Wh'], model['bo']
        # Forward propagation to calculate our predictions
        neth = np.dot(X, Wi) + bh
        lh = np.tanh(neth)
        neto = np.dot(lh, Wh) + bo
        lo = np.exp(neto)
        probs = lo / np.sum(lo, axis=1, keepdims=True)
        # Calculating the loss
        corect_logprobs = -np.log(probs[range(num_examples), y])
        data_loss = np.sum(corect_logprobs)
        # Add regulatization term to loss (optional)
        data_loss += lamda/2 * (np.sum(np.square(Wi)) + np.sum(np.square(Wh)))
        return 1./num_examples * data_loss

    # ??
项目:SentimentAnalysis    作者:Conchylicultor    | 项目源码 | 文件源码
def actFctDerFromOutput(x):
    """
    Derivate of the activation function
    WARNING: In this version, we take as input an output value 
    after the activation function (x = tanh(output of the tensor)).
    This works because the derivate of tanh is function of tanh
    """
    return 1.0 - x**2

#def actFctDer(x):
    #"""
    #Derivate of the activation function
    #"""
    #return 1.0 - np.tanh(x)**2

# Other utils functions
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def conditional_logdensities(self, x_lt_i, range):
        raise(Exception("Not implemented"))
        W = self.W.get_value()
        V_alpha = self.V_alpha.get_value()
        b_alpha = self.b_alpha.get_value()
        V_mu = self.V_mu.get_value()
        b_mu = self.b_mu.get_value()
        V_sigma = self.V_sigma.get_value()
        b_sigma = self.b_sigma.get_value()
        activation_rescaling = self.activation_rescaling.get_value()
        # Calculate
        i = len(x_lt_i)
        a = W[0, :] + np.dot(x_lt_i, W[1:len(x_lt_i) + 1, :])
        h = self.parameters["nonlinearity"].get_numpy_f()(a * activation_rescaling[i])
        alpha = Utils.nnet.softmax(np.tanh(np.dot(h, V_alpha[i]) + b_alpha[i]) * 10.0)  # C
        Mu = np.dot(h, V_mu[i]) + b_mu[i]  # C
        Sigma = np.log(1.0 + np.exp((np.dot(h, V_sigma[i]) + b_sigma[i]) * 10)) / 10  # C

        def ld(x):
            lds = np.array([scipy.stats.norm.logpdf(x, Mu[c], Sigma[c]) for c in xrange(self.n_components)])
            return Utils.nnet.logsumexp(lds + np.log(alpha))
        return np.array([ld(x) for x in range])
项目:learning_rnn    作者:qiangsiwei    | 项目源码 | 文件源码
def bottom_data_is(self, x, s_prev = None, h_prev = None):
        # if this is the first lstm node in the network
        if s_prev == None: s_prev = np.zeros_like(self.state.s)
        if h_prev == None: h_prev = np.zeros_like(self.state.h)
        # save data for use in backprop
        self.s_prev = s_prev
        self.h_prev = h_prev
        # concatenate x(t) and h(t-1)
        xc = np.hstack((x,  h_prev))
        self.state.g = np.tanh(np.dot(self.param.wg, xc) + self.param.bg)
        self.state.i = sigmoid(np.dot(self.param.wi, xc) + self.param.bi)
        self.state.f = sigmoid(np.dot(self.param.wf, xc) + self.param.bf)
        self.state.o = sigmoid(np.dot(self.param.wo, xc) + self.param.bo)
        self.state.s = self.state.g * self.state.i + s_prev * self.state.f
        self.state.h = self.state.s * self.state.o
        self.x = x
        self.xc = xc
项目:smp_base    作者:x75    | 项目源码 | 文件源码
def nonlin_poly(self, u):
    """nonlin_poly

    ip2d.motortransfer_func legacy
    """
    # olimm1 = 0.5
    olim = 2
    # z = array([ 0.27924011,  0.12622341,  0.0330395,  -0.00490162])
    # z = array([ 0.00804775,  0.00223221, -0.1456263,  -0.04297434,  0.74612441,  0.26178644, -0.01953301, -0.00243736])
    # FIXME: somewhere there's a spearate script for generating the coeffs
    z = array([9.46569349e-04,  4.84698808e-03, -1.64436822e-02, -8.76479549e-02,  7.67630339e-02,  4.48107332e-01, -4.53365904e-03, -2.69288039e-04,  1.18423789e-15])
    p3 = poly1d(z)
    # print "pre", self.ip2d.u[ti]
    # self.ip2d.u[ti] = p3(tanh(self.ip2d.u[ti]) * self.olim)
    y = p3(tanh(u) * olim)
    return y
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def make_nn_funs(layer_sizes, L2_reg):
    parser = WeightsParser()
    for i, shape in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        parser.add_weights(('weights', i), shape)
        parser.add_weights(('biases', i), (1, shape[1]))

    def predictions(W_vect, X):
        cur_units = X
        for i in range(len(layer_sizes) - 1):
            cur_W = parser.get(W_vect, ('weights', i))
            cur_B = parser.get(W_vect, ('biases', i))
            cur_units = np.tanh(np.dot(cur_units, cur_W) + cur_B)
        return cur_units - logsumexp(cur_units, axis=1)

    def loss(W_vect, X, T):
        log_prior = -L2_reg * np.dot(W_vect, W_vect)
        log_lik = np.sum(predictions(W_vect, X) * T) 
        return - log_prior - log_lik

    def frac_err(W_vect, X, T):
        return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))

    return parser.N, predictions, loss, frac_err
项目:pokerbots-2017    作者:mhgump    | 项目源码 | 文件源码
def forward_prop_step(self,x_t, s_t1_prev, s_t2_prev):
        # This is how we calculated the hidden state in a simple RNN. No longer!
        # s_t = T.tanh(U[:,x_t] + W.dot(s_t1_prev))

        # Word embedding layer
        x_e = self.E.dot(x_t)

        # GRU Layer 1
        z_t1 = sigmoid(self.U[0].dot(x_e) + self.W[0].dot(s_t1_prev) + self.b[0])
        r_t1 = sigmoid(self.U[1].dot(x_e) + self.W[1].dot(s_t1_prev) + self.b[1])
        c_t1 = np.tanh(self.U[2].dot(x_e) + self.W[2].dot(s_t1_prev * r_t1) + self.b[2])
        s_t1 = (np.ones(z_t1.shape) - z_t1) * c_t1 + z_t1 * s_t1_prev

        # GRU Layer 2
        z_t2 = sigmoid(self.U[3].dot(s_t1) + self.W[3].dot(s_t2_prev) + self.b[3])
        r_t2 = sigmoid(self.U[4].dot(s_t1) + self.W[4].dot(s_t2_prev) + self.b[4])
        c_t2 = np.tanh(self.U[5].dot(s_t1) + self.W[5].dot(s_t2_prev * r_t2) + self.b[5])
        s_t2 = (np.ones(z_t2.shape) - z_t2) * c_t2 + z_t2 * s_t2_prev

        # Final output calculation
        o_t = self.V.dot(s_t2) + self.c

        return [o_t, s_t1, s_t2]
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self, inputs):
        c_prev, x = inputs
        a, i, f, o = _extract_gates(x)

        if isinstance(x, numpy.ndarray):
            self.a = numpy.tanh(a)
            self.i = _sigmoid(i)
            self.f = _sigmoid(f)
            self.o = _sigmoid(o)

            self.c = self.a * self.i + self.f * c_prev
            h = self.o * numpy.tanh(self.c)
        else:
            self.c, h = cuda.elementwise(
                'T c_prev, T a, T i_, T f, T o', 'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + af * c_prev;
                    h = ao * tanh(c);
                ''',
                'lstm_fwd', preamble=_preamble)(c_prev, a, i, f, o)

        return self.c, h
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, c_prev_data, x_data):
        c_prev = chainer.Variable(c_prev_data)
        x = chainer.Variable(x_data)
        c, h = functions.lstm(c_prev, x)
        self.assertEqual(c.data.dtype, self.dtype)
        self.assertEqual(h.data.dtype, self.dtype)

        # Compute expected out
        a_in = self.x[:, [0, 4]]
        i_in = self.x[:, [1, 5]]
        f_in = self.x[:, [2, 6]]
        o_in = self.x[:, [3, 7]]

        c_expect = _sigmoid(i_in) * numpy.tanh(a_in) + \
            _sigmoid(f_in) * self.c_prev
        h_expect = _sigmoid(o_in) * numpy.tanh(c_expect)

        gradient_check.assert_allclose(
            c_expect, c.data, **self.check_forward_options)
        gradient_check.assert_allclose(
            h_expect, h.data, **self.check_forward_options)
项目:tf-lstm-char-cnn    作者:mkroutikov    | 项目源码 | 文件源码
def test(self):

        with self.test_session() as sess:

            inp = tf.constant(np.array([
                [[1.0], [2.0], [2.0], [0.0]]
            ], dtype=np.float32))

            x = tdnn(inp, [2], [1])

            result = sess.run(x, {
                'TDNN/kernel_2/w:0': np.array([[[[1.0]], [[-1.0]]]]),
                'TDNN/kernel_2/b:0': np.array([1.0]),
            })

            print(result)
            self.assertAllClose(result, [[np.tanh(3.0)]])
项目:tf-lstm-char-cnn    作者:mkroutikov    | 项目源码 | 文件源码
def test_cnn_step(self):

        with self.test_session() as sess:

            m = self.model()

            input_cnn = sess.run(m.input_cnn, {

                'TDNN/kernel_2/w:0': np.array([[
                    [[1], [1], [1]],
                    [[1], [1], [1]],
                ]]),
                'TDNN/kernel_2/b:0': np.array([0]),

                m.input_embedded: np.array([[
                    [1,0,0], [0,0,1], [0,1,0], [0,0,0], [0,0,0],
                ]])
            })

            self.assertAllClose(input_cnn, np.array([
                [[np.tanh(2)]],
            ]))
项目:enhancement    作者:lwzswufe    | 项目源码 | 文件源码
def bottom_data_is(self, x, s_prev=None, h_prev=None):
        # if this is the first lstm node in the network
        if s_prev is None:
            s_prev = np.zeros_like(self.state.s)
        if h_prev is None:
            h_prev = np.zeros_like(self.state.h)
        # save data for use in backprop
        self.s_prev = s_prev
        self.h_prev = h_prev

        # concatenate x(t) and h(t-1)
        xc = np.hstack((x,  h_prev))
        self.state.g = np.tanh(np.dot(self.param.wg, xc) + self.param.bg)
        self.state.i = sigmoid(np.dot(self.param.wi, xc) + self.param.bi)
        self.state.f = sigmoid(np.dot(self.param.wf, xc) + self.param.bf)
        self.state.o = sigmoid(np.dot(self.param.wo, xc) + self.param.bo)
        self.state.s = self.state.g * self.state.i + s_prev * self.state.f
        self.state.h = self.state.s * self.state.o
        self.x = x
        self.xc = xc
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def true_f(self, x):
        return 1. * (1. + x) * np.sin(10. * np.tanh(x))
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def repair_genotype(self, x, copy_if_changed=False):
        """make sure that solutions fit to the sample distribution.

        This interface is versatile and likely to change.

        In particular the frequency of ``x - self.mean`` being long in
        Mahalanobis distance is limited, currently clipping at
        ``N**0.5 + 2 * N / (N + 2)`` is implemented.
        """
        x = array(x, copy=False)
        mold = array(self.mean, copy=False)
        if 1 < 3:  # hard clip at upper_length
            upper_length = self.N**0.5 + 2 * self.N / (self.N + 2)
            # should become an Option, but how? e.g. [0, 2, 2]
            fac = self.mahalanobis_norm(x - mold) / upper_length

            if fac > 1:
                if copy_if_changed:
                    x = (x - mold) / fac + mold
                else:  # should be 25% faster:
                    x -= mold
                    x /= fac
                    x += mold
                # print self.countiter, k, fac, self.mahalanobis_norm(pop[k] - mold)
                # adapt also sigma: which are the trust-worthy/injected solutions?
            elif 11 < 3:
                return np.exp(np.tanh(((upper_length * fac)**2 / self.N - 1) / 2) / 2)
        else:
            if 'checktail' not in self.__dict__:  # hasattr(self, 'checktail')
                raise NotImplementedError
                # from check_tail_smooth import CheckTail  # for the time being
                # self.checktail = CheckTail()
                # print('untested feature checktail is on')
            fac = self.checktail.addchin(self.mahalanobis_norm(x - mold))

            if fac < 1:
                x = fac * (x - mold) + mold

        return x
项目:snake    作者:rhinech    | 项目源码 | 文件源码
def gen_samples(self, num_samples):
        """Generate sample for ML near the snake."""

        points = []  # the sample points
        labels = []  # the labels
        whichs = []  # the corresponding node for the sample
        deri_g = []  # the partial derivative to g
        deri_T = []  # the partial derivative to T
        counter = 0
        assert num_samples % self.length == 0
        for i, (v, n) in enumerate(zip(self.vertices, self.normals())):
            for d in np.linspace(-1, 1, num_samples / self.length):
                # geometry
                r = 2 * self.widths[i] * d
                s = v + r * n
                l = array([0.5 * (1. - np.tanh(d)),
                           0.5 * (1. + np.tanh(d))])
                points.append(s)
                labels.append(l)
                whichs.append(i)
                # cal derivatives
                cosh_d = np.cosh(d)
                deri_g.append(1 / (4 * self.widths[i] * cosh_d * cosh_d))
                deri_T.append(d / (2 * self.widths[i] * cosh_d * cosh_d))
                counter += 1
                if counter == num_samples:
                    return array(points), array(labels), array(whichs), array(deri_g), array(deri_T)
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def get(activation):
    if activation.__class__.__name__ == 'str':
        if activation in ['sigmoid', 'Sigmoid']:
            return Sigmoid()
        if activation in ['tan', 'tanh', 'Tanh']:
            return Tanh()
        if activation in ['relu', 'ReLU', 'RELU']:
            return ReLU()
        if activation in ['linear', 'Linear']:
            return Linear()
        if activation in ['softmax', 'Softmax']:
            return Softmax()
        if activation in ['elliot', 'Elliot']:
            return Elliot()
        if activation in ['symmetric_elliot', 'SymmetricElliot']:
            return SymmetricElliot()
        if activation in ['SoftPlus', 'soft_plus', 'softplus']:
            return SoftPlus()
        if activation in ['SoftSign', 'softsign', 'soft_sign']:
            return SoftSign()
        raise ValueError('Unknown activation name: {}.'.format(activation))

    elif isinstance(activation, Activation):
        return copy.deepcopy(activation)

    else:
        raise ValueError("Unknown type: {}.".format(activation.__class__.__name__))
项目:BankCardRecognizer    作者:ardiya    | 项目源码 | 文件源码
def sigmoid(x):
    return (1 + numpy.tanh(x / 2.0)) / 2
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def inversetransformparameterndarray(parameterndarray, includejumps):
    parameterndarray = npu.tondim1(parameterndarray)
    res = [
            parameterndarray[0],  # meanlogvar
            np.tanh(.5 * parameterndarray[1]), # persistence
            np.sqrt(np.exp(parameterndarray[2])), # voloflogvar
            np.tanh(.5 * parameterndarray[3]) # cor
        ]
    if includejumps:
        res.append(.5 * (np.tanh(parameterndarray[4]) + 1)) # jumpintensity
        res.append(np.sqrt(np.exp(parameterndarray[5]))) # jumpvol
    else:
        res.append(0.)
        res.append(1.)
    return np.array(res)
项目:Gym_LineFollower    作者:Chachay    | 项目源码 | 文件源码
def act(self):
        return np.tanh(np.random.randn(self.dim_action)) # random action
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def __init__(self, layers, activation = 'tanh'):
        if activation == 'logistic':
            self.activation = self.logistic
            self.activation_deriv = self.logistic_derivative
        elif activation == 'tanh':
            self.activation = self.tanh
            self.activation_deriv = self.tanh_deriv
        '''
        generate weight matrix with random float
        '''
        self.layers = layers
        self.weights = []
        for i in range(1, len(layers) - 1):
            self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
            self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def tanh(x):
        return np.tanh(x)
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def tanh_deriv(x):
        return 1.0 - np.tanh(x) * np.tanh(x)
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def fcn_ComputeExcitation_FEM(f,sig,mur,a):
    """Compute Excitation Factor (FEM)"""

    w = 2*np.pi*f
    mu = 4*np.pi*1e-7*mur
    alpha = a*np.sqrt(1j*w*mu*sig)

    chi = 1.5*(2*mur*(np.tanh(alpha) - alpha) + (alpha**2*np.tanh(alpha) - alpha + np.tanh(alpha)))/(mur*(np.tanh(alpha) - alpha) - (alpha**2*np.tanh(alpha) - alpha + np.tanh(alpha)))

    return chi
项目:LinearCorex    作者:gregversteeg    | 项目源码 | 文件源码
def g(x, t=4):
    """A transformation that suppresses outliers for a standard normal."""
    xp = np.clip(x, -t, t)
    diff = np.tanh(x - xp)
    return xp + diff
项目:MachineLearningPracticePrograms    作者:Subarno    | 项目源码 | 文件源码
def lossFun(inputs, targets, hprev):
    """
    inputs, targets are both list of integers.
    """
    xs, hs, ys, ps = {}, {}, {}, {}
    hs[-1] = np.copy(hprev)
    loss = 0
    #forward pass
    for t in range(len(inputs)):
        xs[t] = np.zeros((vocab_size,1)) #encode in 1-of-k representation
        xs[t][inputs[t]] = 1
        hs[t] = np.tanh(np.dot(Wxh, xs[t])) + np.dot(Whh, hs[t-1]+bh)
        ys[t] = np.dot(Why, hs[t]) + by
        ps[t] = np.exp(ys[t])/np.sum(np.exp(ys[t])) #probabilities for next char
        loss += -np.log(ps[t][targets[t],0]) #softmax cross-entropy loss

    #backward pass
    dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
    dbh, dby = np.zeros_like(bh), np.zeros_like(by)
    dhnext = np.zeros_like(hs[0])
    for t in reversed(range(len(inputs))):
        dy = np.copy(ps[t])
        dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
        dWhy += np.dot(dy, hs[t].T)
        dby += dy
        dh = np.dot(Why.T, dy) + dhnext # backprop into h
        dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
        dbh += dhraw
        dWxh += np.dot(dhraw, xs[t].T)
        dWhh += np.dot(dhraw, hs[t-1].T)
        dhnext = np.dot(Whh.T, dhraw)
    for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
        np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
    return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
项目:MachineLearningPracticePrograms    作者:Subarno    | 项目源码 | 文件源码
def lossFun(inputs, targets, hprev):
    """
    inputs, targets are both list of integers.
    """
    xs, hs, ys, ps = {}, {}, {}, {}
    hs[-1] = np.copy(hprev)
    loss = 0
    #forward pass
    for t in range(len(inputs)):
        xs[t] = np.zeros((vocab_size,1)) #encode in 1-of-k representation
        xs[t][inputs[t]] = 1
        hs[t] = np.tanh(np.dot(Wxh, xs[t])) + np.dot(Whh, hs[t-1]+bh)
        ys[t] = np.dot(Why, hs[t]) + by
        ps[t] = np.exp(ys[t])/np.sum(np.exp(ys[t])) #probabilities for next char
        loss += -np.log(ps[t][targets[t],0]) #softmax cross-entropy loss

    #backward pass
    dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
    dbh, dby = np.zeros_like(bh), np.zeros_like(by)
    dhnext = np.zeros_like(hs[0])
    for t in reversed(range(len(inputs))):
        dy = np.copy(ps[t])
        dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
        dWhy += np.dot(dy, hs[t].T)
        dby += dy
        dh = np.dot(Why.T, dy) + dhnext # backprop into h
        dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
        dbh += dhraw
        dWxh += np.dot(dhraw, xs[t].T)
        dWhh += np.dot(dhraw, hs[t-1].T)
        dhnext = np.dot(Whh.T, dhraw)
    for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
        np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
    return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def update_output(self, x):
        self.output = np.tanh(x)
        return self.output
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic_ufuncs(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.cos(x), cos(xm))
        assert_equal(np.cosh(x), cosh(xm))
        assert_equal(np.sin(x), sin(xm))
        assert_equal(np.sinh(x), sinh(xm))
        assert_equal(np.tan(x), tan(xm))
        assert_equal(np.tanh(x), tanh(xm))
        assert_equal(np.sqrt(abs(x)), sqrt(xm))
        assert_equal(np.log(abs(x)), log(xm))
        assert_equal(np.log10(abs(x)), log10(xm))
        assert_equal(np.exp(x), exp(xm))
        assert_equal(np.arcsin(z), arcsin(zm))
        assert_equal(np.arccos(z), arccos(zm))
        assert_equal(np.arctan(z), arctan(zm))
        assert_equal(np.arctan2(x, y), arctan2(xm, ym))
        assert_equal(np.absolute(x), absolute(xm))
        assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
        assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
        assert_equal(np.equal(x, y), equal(xm, ym))
        assert_equal(np.not_equal(x, y), not_equal(xm, ym))
        assert_equal(np.less(x, y), less(xm, ym))
        assert_equal(np.greater(x, y), greater(xm, ym))
        assert_equal(np.less_equal(x, y), less_equal(xm, ym))
        assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
        assert_equal(np.conjugate(x), conjugate(xm))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncRegression(self):
        # Tests new ufuncs on MaskedArrays.
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor',
                  ]:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(numpy.ma.core, f)
            args = self.d[:uf.nin]
            ur = uf(*args)
            mr = mf(*args)
            assert_equal(ur.filled(0), mr.filled(0), f)
            assert_mask_equal(ur.mask, mr.mask, err_msg=f)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncs1(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.cos(x), cos(xm)))
        self.assertTrue(eq(np.cosh(x), cosh(xm)))
        self.assertTrue(eq(np.sin(x), sin(xm)))
        self.assertTrue(eq(np.sinh(x), sinh(xm)))
        self.assertTrue(eq(np.tan(x), tan(xm)))
        self.assertTrue(eq(np.tanh(x), tanh(xm)))
        with np.errstate(divide='ignore', invalid='ignore'):
            self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
            self.assertTrue(eq(np.log(abs(x)), log(xm)))
            self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
        self.assertTrue(eq(np.exp(x), exp(xm)))
        self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
        self.assertTrue(eq(np.arccos(z), arccos(zm)))
        self.assertTrue(eq(np.arctan(z), arctan(zm)))
        self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
        self.assertTrue(eq(np.absolute(x), absolute(xm)))
        self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
        self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
        self.assertTrue(eq(np.less(x, y), less(xm, ym)))
        self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
        self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
        self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
        self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
        self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncRegression(self):
        f_invalid_ignore = [
            'sqrt', 'arctanh', 'arcsin', 'arccos',
            'arccosh', 'arctanh', 'log', 'log10', 'divide',
            'true_divide', 'floor_divide', 'remainder', 'fmod']
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor']:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(np.ma, f)
            args = self.d[:uf.nin]
            with np.errstate():
                if f in f_invalid_ignore:
                    np.seterr(invalid='ignore')
                if f in ['arctanh', 'log', 'log10']:
                    np.seterr(divide='ignore')
                ur = uf(*args)
                mr = mf(*args)
            self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
            self.assertTrue(eqmask(ur.mask, mr.mask))
项目:Sverchok    作者:Sverchok    | 项目源码 | 文件源码
def tanh(x: Number = 0.0) -> Number:
    return np.tanh(x)


#  Constants times input n
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def test_make_gru(dim_in=31, dim_h=11, dim_out=None,
                  i_net=None, a_net=None, o_net=None, c_net=None):
    print 'Testing GRU formation'

    if i_net is None:
        i_net = dict(
            dim_h=17,
            n_layers=2,
            h_act='T.tanh',
            weight_scale=0.1,
        )
    if a_net is None:
        a_net = dict(
            dim_h=19,
            n_layers=2,
            h_act='T.tanh',
            weight_scale=0.1
        )
    if o_net is None:
        o_net = dict(
            dim_h=23,
            n_layers=2,
            weight_scale=0.1,
            distribution='binomial'
        )

    nets = dict(i_net=i_net, a_net=a_net, o_net=o_net, c_net=c_net)

    trng = RandomStreams(101)

    rnn = GRU.factory(dim_in=dim_in, dim_hs=[dim_h], dim_out=dim_out, **nets)
    rnn.set_tparams()
    print 'GRU formed correctly'

    return rnn
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def test_darn(dim_in=5, dim_h=3, dim_out=7, n_samples=13):
    darn = DARN(dim_in, dim_h, dim_out, 2, h_act='T.tanh', out_act='T.nnet.sigmoid')
    tparams = darn.set_tparams()

    X = T.matrix('X', dtype=floatX)
    H = T.matrix('H', dtype=floatX)
    C = darn(H)
    NLP = darn.neg_log_prob(X, C)

    f = theano.function([X, H], [C, NLP])

    x = np.random.randint(0, 2, size=(n_samples, dim_out)).astype(floatX)
    h = np.random.randint(0, 2, size=(n_samples, dim_in)).astype(floatX)

    c_t, nlp_t = f(x, h)
    print c_t.shape

    d_np = np.tanh(np.dot(h, darn.params['W0']) + darn.params['b0'])
    c_np = np.dot(d_np, darn.params['W1']) + darn.params['b1']

    assert np.allclose(c_t, c_np), (c_t, c_np)

    z_np = np.zeros((n_samples, dim_out)).astype(floatX) + darn.params['bar'][None, :] + c_np

    for i in xrange(dim_out):
        for j in xrange(i + 1, dim_out):
            z_np[:, i] += darn.params['War'][j, i] * x[:, j]

    p_np = sigmoid(z_np)

    p_np = np.clip(p_np, 1e-7, 1 - 1e-7)
    nlp_np = (- x * np.log(p_np) - (1 - x) * np.log(1 - p_np)).sum(axis=1)

    assert np.allclose(nlp_t, nlp_np), (nlp_t, nlp_np)

    samples, updates_s = darn.sample(C, n_samples=n_samples-1)
    f = theano.function([H], samples, updates=updates_s)
    print f(h)