Python chainer.functions 模块,reshape() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.reshape()

项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x):
        h = x
        for l in self.conv_layers:
            h = self.activation(l(h))

        # Advantage
        batch_size = x.shape[0]
        ya = self.a_stream(h)
        mean = F.reshape(
            F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
        ya, mean = F.broadcast(ya, mean)
        ya -= mean

        # State value
        ys = self.v_stream(h)

        ya, ys = F.broadcast(ya, ys)
        q = ya + ys
        return action_value.DiscreteActionValue(q)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _compute_y_and_t(self, exp_batch, gamma):

        batch_state = exp_batch['state']
        batch_size = len(batch_state)

        # Compute Q-values for current states
        qout = self.q_function(batch_state)

        batch_actions = exp_batch['action']
        batch_q = F.reshape(qout.evaluate_actions(
            batch_actions), (batch_size, 1))

        # Target values must also backprop gradients
        batch_q_target = F.reshape(
            self._compute_target_values(exp_batch, gamma), (batch_size, 1))

        return batch_q, scale_grad.scale_grad(batch_q_target, self.grad_scale)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _compute_y_and_t(self, exp_batch, gamma):
        batch_size = exp_batch['reward'].shape[0]

        # Compute Q-values for current states
        batch_state = exp_batch['state']

        qout = self.model(batch_state)

        batch_actions = exp_batch['action']
        batch_q = F.reshape(qout.evaluate_actions(
            batch_actions), (batch_size, 1))

        with chainer.no_backprop_mode():
            batch_q_target = F.reshape(
                self._compute_target_values(exp_batch, gamma),
                (batch_size, 1))

        return batch_q, batch_q_target
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        # First conv layer
        h = self[0](x)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        n, nc, ns, nx, ny = h.data.shape
        h = F.reshape(h, (n, nc * ns, nx, ny))
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        h = self.conv1(x, train)
        h = self.conv2(h, train)
        h = self.conv3(h, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.conv4(h, train)
        h = self.conv5(h, train)
        h = self.conv6(h, train)
        h = self.inception_f5_1(h, train)
        h = self.inception_f5_2(h, train)
        h = self.inception_f5_3(h, train)
        h = self.inception_f6_1(h, train)
        h = self.inception_f6_2(h, train)
        h = self.inception_f6_3(h, train)
        h = self.inception_f6_4(h, train)
        h = self.inception_f6_5(h, train)
        h = self.inception_f7_1(h, train)
        h = self.inception_f7_2(h, train)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.linear(h)
        return h
项目:biaffineparser    作者:chantera    | 项目源码 | 文件源码
def compute_loss(self, y, t):
        arc_logits, label_logits = y
        true_arcs, true_labels = t.T

        b, l1, l2 = arc_logits.shape
        true_arcs = F.pad_sequence(true_arcs, padding=-1)
        if not self.model._cpu:
            true_arcs.to_gpu()
        arc_loss = F.softmax_cross_entropy(
            F.reshape(arc_logits, (b * l1, l2)),
            F.reshape(true_arcs, (b * l1,)),
            ignore_label=-1)

        b, l1, d = label_logits.shape
        true_labels = F.pad_sequence(true_labels, padding=-1)
        if not self.model._cpu:
            true_labels.to_gpu()
        label_loss = F.softmax_cross_entropy(
            F.reshape(label_logits, (b * l1, d)),
            F.reshape(true_labels, (b * l1,)),
            ignore_label=-1)

        loss = arc_loss + label_loss
        return loss
项目:biaffineparser    作者:chantera    | 项目源码 | 文件源码
def compute_accuracy(self, y, t):
        arc_logits, label_logits = y
        true_arcs, true_labels = t.T

        b, l1, l2 = arc_logits.shape
        true_arcs = F.pad_sequence(true_arcs, padding=-1)
        if not self.model._cpu:
            true_arcs.to_gpu()
        arc_accuracy = F.accuracy(
            F.reshape(arc_logits, (b * l1, l2)),
            F.reshape(true_arcs, (b * l1,)),
            ignore_label=-1)

        b, l1, d = label_logits.shape
        true_labels = F.pad_sequence(true_labels, padding=-1)
        if not self.model._cpu:
            true_labels.to_gpu()
        label_accuracy = F.accuracy(
            F.reshape(label_logits, (b * l1, d)),
            F.reshape(true_labels, (b * l1,)),
            ignore_label=-1)

        accuracy = (arc_accuracy + label_accuracy) / 2
        return accuracy
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def nearest_neighbor_patch(x, patch, patch_norm):
    assert patch.data.shape[0] == 1, 'mini batch size of patch must be 1'
    assert patch_norm.data.shape[0] == 1, 'mini batch size of patch_norm must be 1'

    xp = cuda.get_array_module(x.data)
    z = x.data
    b, ch, h, w = z.shape
    z = z.transpose((1, 0, 2, 3)).reshape((ch, -1))
    norm = xp.expand_dims(xp.sum(z ** 2, axis=0) ** 0.5, 0)
    z = z / xp.broadcast_to(norm, z.shape)
    p = patch.data
    p_norm = patch_norm.data
    p = p.reshape((ch, -1))
    p_norm = p_norm.reshape((1, -1))
    p_normalized = p / xp.broadcast_to(p_norm, p.shape)
    correlation = z.T.dot(p_normalized)
    min_index = xp.argmax(correlation, axis=1)
    nearest_neighbor = p.take(min_index, axis=1).reshape((ch, b, h, w)).transpose((1, 0, 2, 3))
    return Variable(nearest_neighbor)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def luminance_only(x, y):
    xp = cuda.get_array_module(x)
    w = xp.asarray([0.114, 0.587, 0.299], dtype=np.float32)
    x_shape = x.shape
    y_shape = y.shape

    x = x.reshape(x_shape[:2] + (-1,))
    xl = xp.zeros((x.shape[0], 1, x.shape[2]), dtype=np.float32)
    for i in six.moves.range(len(x)):
        xl[i,:] = w.dot(x[i])
    xl_mean = xp.mean(xl, axis=2, keepdims=True)
    xl_std = xp.std(xl, axis=2, keepdims=True)

    y = y.reshape(y_shape[:2] + (-1,))
    yl = xp.zeros((y.shape[0], 1, y.shape[2]), dtype=np.float32)
    for i in six.moves.range(len(y)):
        yl[i,:] = w.dot(y[i])
    yl_mean = xp.mean(yl, axis=2, keepdims=True)
    yl_std = xp.std(yl, axis=2, keepdims=True)

    xl = (xl - xl_mean) / xl_std * yl_std + yl_mean
    return xp.repeat(xl, 3, axis=1).reshape(x_shape)
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def match_color_histogram(x, y):
    z = np.zeros_like(x)
    shape = x[0].shape
    for i in six.moves.range(len(x)):
        a = x[i].reshape((3, -1))
        a_mean = np.mean(a, axis=1, keepdims=True)
        a_var = np.cov(a)
        d, v = np.linalg.eig(a_var)
        d += 1e-6
        a_sigma_inv = v.dot(np.diag(d ** (-0.5))).dot(v.T)

        b = y[i].reshape((3, -1))
        b_mean = np.mean(b, axis=1, keepdims=True)
        b_var = np.cov(b)
        d, v = np.linalg.eig(b_var)
        b_sigma = v.dot(np.diag(d ** 0.5)).dot(v.T)

        transform = b_sigma.dot(a_sigma_inv)
        z[i,:] = (transform.dot(a - a_mean) + b_mean).reshape(shape)
    return z
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        # [(sentence length, (word_dim + suf_dim + prf_dim))]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        ys = [self.linear2(F.relu(
                self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        # [(sentence length, (word_dim + suf_dim + prf_dim))]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        ys = [self.linear2(F.relu(
                self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs, ts):
        """
        Inputs:
            xs (tuple(Variable, Variable, Variable)):
                each of Variables is of dim (batchsize,)
            ts Variable:
                (batchsize)
        """
        words, suffixes, caps = xs[:,:7], xs[:, 7:14], xs[:, 14:]
        h_w = self.emb_word(words)
        h_c = self.emb_caps(caps)
        h_s = self.emb_suffix(suffixes)
        h = F.concat([h_w, h_c, h_s], 2)
        batchsize, ntokens, hidden = h.data.shape
        h = F.reshape(h, (batchsize, ntokens * hidden))
        ys = self.linear(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)

        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def forward(self, ws, ss, ps):
        batchsize, length = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])
        ws = self.emb_word(ws) # (batch, length, word_dim)
        ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
        ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
        hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.squeeze(h_in_f, 0)))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.squeeze(h_in_b, 0)))
            hs_b.append(h_b)

        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                for h_f, h_b in zip(hs_f, reversed(hs_b))]
        return ys
项目:cnn-text-classification    作者:marevol    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        hlist = []
        h_0 = self['embed'](x)
        if not self.non_static:
            h_0 = Variable(h_0.data)
        h_1 = F.reshape(h_0, (h_0.shape[0], 1, h_0.shape[1], h_0.shape[2]))
        for filter_h in self.filter_sizes:
            pool_size = (self.doc_length - filter_h + 1, 1)
            h = F.max_pooling_2d(F.relu(self['conv' + str(filter_h)](h_1)), pool_size)
            hlist.append(h)
        h = F.concat(hlist)
        pos = 0
        while pos < len(self.hidden_units) - 1:
            h = F.dropout(F.relu(self['l' + str(pos)](h)))
            pos += 1
        y = F.relu(self['l' + str(pos)](h))
        return y
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x, split_into_variables=True):
        batchsize = x.shape[0]
        seq_length = x.shape[3]

        out_data = super(AcousticModel, self).__call__(x)
        assert out_data.shape[3] == seq_length

        # CTC???????RNN???????Variable????????
        if split_into_variables:
            out_data = F.swapaxes(out_data, 1, 3)
            out_data = F.reshape(out_data, (batchsize, -1))
            out_data = F.split_axis(out_data, seq_length, axis=1)
        else:
            out_data = F.swapaxes(out_data, 1, 3)
            out_data = F.squeeze(out_data, axis=2)

        return out_data
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x, split_into_variables=True):
        batchsize = x.shape[0]
        seq_length = x.shape[3]

        out_data = super(AcousticModel, self).__call__(x)
        assert out_data.shape[3] == seq_length

        # CTC???????RNN???????Variable????????
        if split_into_variables:
            out_data = F.swapaxes(out_data, 1, 3)
            out_data = F.reshape(out_data, (batchsize, -1))
            out_data = F.split_axis(out_data, seq_length, axis=1)
        else:
            out_data = F.swapaxes(out_data, 1, 3)
            out_data = F.squeeze(out_data, axis=2)

        return out_data
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, X, return_last=False):
        batchsize = X.shape[0]
        seq_length = X.shape[1]
        enmbedding = self.embed(X)
        enmbedding = F.swapaxes(enmbedding, 1, 2)

        out_data = self._forward_layer(0, enmbedding)
        in_data = [out_data]

        for layer_index in range(1, self.num_layers):
            out_data = self._forward_layer(layer_index, F.concat(in_data) if self.densely_connected else in_data[-1])   # dense conv
            in_data.append(out_data)

        out_data = F.concat(in_data) if self.densely_connected else out_data    # dense conv

        if return_last:
            out_data = out_data[:, :, -1, None]

        if self.using_dropout:
            out_data = F.dropout(out_data, ratio=self.dropout)

        out_data = self.fc(out_data)
        out_data = F.reshape(F.swapaxes(out_data, 1, 2), (-1, self.vocab_size))

        return out_data
项目:chainer-dfi    作者:dsanno    | 项目源码 | 文件源码
def mean_feature(net, paths, image_size, base_feature, top_num, batch_size, clip_rect=None):
    xp = net.xp
    image_num = len(paths)
    features = []
    for i in six.moves.range(0, image_num, batch_size):
        x = [preprocess_image(Image.open(path).convert('RGB'), image_size, clip_rect) for path in paths[i:i + batch_size]]
        x = xp.asarray(np.concatenate(x, axis=0))
        y = feature(net, x)
        features.append([cuda.to_cpu(layer.data) for layer in y])
    if image_num > top_num:
        last_features = np.concatenate([f[-1] for f in features], axis=0)
        last_features = last_features.reshape((last_features.shape[0], -1))
        base_feature = cuda.to_cpu(base_feature).reshape((1, -1,))
        diff = np.sum((last_features - base_feature) ** 2, axis=1)

        nearest_indices = np.argsort(diff)[:top_num]
        nearests = [np.concatenate(xs, axis=0)[nearest_indices] for xs in zip(*features)]
    else:
        nearests = [np.concatenate(xs, axis=0) for xs in zip(*features)]

    return [xp.asarray(np.mean(f, axis=0, keepdims=True)) for f in nearests]
项目:chainer_nmt    作者:odashi    | 项目源码 | 文件源码
def _context(self, p, fb_mat, fbe_mat):
    batch_size, source_length, _ = fb_mat.data.shape
    # {pe,e}_mat: shape = [batch * srclen, atten]
    pe_mat = F.reshape(
        F.broadcast_to(
            F.expand_dims(self.p_e(p), 1),
            [batch_size, source_length, self.atten_size]),
        [batch_size * source_length, self.atten_size])
    e_mat = F.tanh(fbe_mat + pe_mat)
    # a_mat: shape = [batch, srclen]
    a_mat = F.softmax(F.reshape(self.e_a(e_mat), [batch_size, source_length]))
    # q: shape = [batch, 2 * hidden]
    q = F.reshape(
        F.batch_matmul(a_mat, fb_mat, transa=True),
        [batch_size, 2 * self.hidden_size])

    return q
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def predict(self, x):
        """ Predict 2D pose from image. """
        # layer1
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer2
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer3-5
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer6-8
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)
        return F.reshape(h, (-1, self.Nj, 2))
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def __call__(self, x, context):

        x = F.broadcast_to(x[:, None], (context.shape[0], context.shape[1]))
        x = F.reshape(x, (context.shape[0] * context.shape[1],))

        if args.subword == 'rnn':
            context = context.reshape((context.shape[0] * context.shape[1]))
            e = self.rnn.charRNN(context)

        if args.subword == 'none':
            e = self.embed(context)
            e = F.reshape(e, (e.shape[0] * e.shape[1], e.shape[2]))

        loss = self.loss_func(e, x)
        reporter.report({'loss': loss}, self)
        return loss
项目:nn_parsers    作者:odashi    | 项目源码 | 文件源码
def forward(self, data):
    ep_list = [self.p_embed(d[0], d[1]) for d in data]
    ec_list = [self.c_embed(d[0], d[1]) for d in data]
    er_list = [self.r_embed(d[0], d[1]) for d in data]
    p_list = self.p_encode(ep_list)
    c_list = self.c_encode(ec_list)
    r_list = self.r_encode(er_list)

    P = functions.reshape(
      functions.concat(p_list, 0),
      (1, len(data), self.hidden_size))
    C = functions.reshape(
      functions.concat(c_list, 0),
      (1, len(data), self.hidden_size))
    R = functions.concat(r_list, 0)

    parent_scores = functions.reshape(
      functions.batch_matmul(C, P, transb=True),
      (len(data), len(data)))
    root_scores = functions.reshape(
      self.r_scorer(R),
      (1, len(data)))

    return parent_scores, root_scores
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def avg_pool_max_pool(self, hs):
        num_output = len(hs[0]) 
        houts = []
        i = 0
        shape = hs[0][i].shape
        h = F.dstack([F.reshape(h[i],(shape[0], -1)) for h in hs])
        x = 1.0*F.sum(h,2)/h.shape[2]
        x = F.reshape(x, shape)
        houts.append(x)

        for i in range(1,num_output):
            shape = hs[0][i].shape
            h = F.dstack([F.reshape(h[i],(shape[0], -1)) for h in hs])
            x = 1.0*F.max(h,2)
            x = F.reshape(x, shape)
            houts.append(x)
        return houts
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def max_pool_avg_pool(self, hs):
        num_output = len(hs[0]) 
        houts = []
        i = 0
        shape = hs[0][i].shape
        h = F.dstack([F.reshape(h[i],(shape[0], -1)) for h in hs])
        x = 1.0*F.max(h,2)
        x = F.reshape(x, shape)
        houts.append(x)

        for i in range(1,num_output):
            shape = hs[0][i].shape
            h = F.dstack([F.reshape(h[i],(shape[0], -1)) for h in hs])
            x = 1.0*F.sum(h,2)/h.shape[2]
            x = F.reshape(x, shape)
            houts.append(x)
        return houts
项目:soft-dtw    作者:mblondel    | 项目源码 | 文件源码
def __call__(self, x, t):
        y = self.predictor(x)

        if self.loss == "euclidean":
            return F.mean_squared_error(y, t)

        elif self.loss == "sdtw":
            loss = 0
            for i in range(y.shape[0]):
                y_i = F.reshape(y[i], (-1,1))
                t_i = F.reshape(t[i], (-1,1))
                loss += SoftDTWLoss(self.gamma)(y_i, t_i)
            return loss

        else:
            raise ValueError("Unknown loss")
项目:googlenet_v2    作者:nutszebra    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        h = self.conv1(x, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.conv2_1x1(h, train)
        h = self.conv2_3x3(h, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception3a(h, train)
        h = self.inception3b(h, train)
        h = self.inception3c(h, train)
        h = self.inception4a(h, train)
        h = self.inception4b(h, train)
        h = self.inception4c(h, train)
        h = self.inception4d(h, train)
        h = self.inception4e(h, train)
        h = self.inception5a(h, train)
        h = self.inception5b(h, train)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = self.linear(h)
        return h
项目:seq2seq_temporal_attention    作者:aistairc    | 项目源码 | 文件源码
def __call__(self, a_list, state, batch_size, xp):
        e_list = []
        sum_e = xp.zeros((batch_size, 1), dtype=xp.float32)
        for a in a_list:
            w = reshape(batch_matmul(state['h2'], a, transa=True), (batch_size, 1))
            w.data = xp.clip(w.data, -40, 40)
            e = exp(w)
            e_list.append(e)
            sum_e = sum_e + e

        context = xp.zeros((batch_size, self.hidden_size), dtype=xp.float32)

        for a, e in zip(a_list, e_list):
            e /= sum_e
            context = context + reshape(batch_matmul(a, e), (batch_size, self.hidden_size))
        return context, e_list, sum_e
项目:teras    作者:chantera    | 项目源码 | 文件源码
def __call__(self, x, hs):
        batch, dim = x.shape
        alphas = 0
        _sum = 0
        for h in F.transpose_sequence(hs[:batch]):
            size = h.shape[0]
            if size < batch:
                h = F.vstack([h, variable.Variable(
                    self.xp.zeros((batch - size, h.shape[1]), dtype='f'))])
            score = self._score_func(x, h)
            e = F.exp(score)
            _sum += e
            alphas += batch_matmul(h, e)
        c = F.reshape(batch_matmul(F.reshape(alphas, (batch, dim)),
                                   (1 / _sum)), (batch, dim))
        return c
项目:teras    作者:chantera    | 项目源码 | 文件源码
def __call__(self, chars):
        if not isinstance(chars, (tuple, list)):
            chars = [chars]
        char_ids, boundaries = self._create_sequence(chars)
        x = self.embed(self.xp.array(char_ids))
        x = F.dropout(x, self._dropout)
        length, dim = x.shape
        C = self.conv(F.reshape(x, (1, 1, length, dim)))
        # C.shape -> (1, out_size, length, 1)
        C = F.split_axis(F.transpose(F.reshape(C, (self.out_size, length))),
                         boundaries, axis=0)
        ys = F.max(F.pad_sequence(
            [matrix for i, matrix in enumerate(C) if i % 2 == 1],
            padding=-np.inf), axis=1)  # max over time pooling
        # assert len(chars) == ys.shape[0]
        return ys
项目:teras    作者:chantera    | 项目源码 | 文件源码
def __call__(self, x1, x2):
        xp = self.xp
        out_size = self.out_size
        batch_size, len1, dim1 = x1.shape
        if not self.nobias[0]:
            x1 = F.concat((x1, xp.ones((batch_size, len1, 1),
                                       dtype=xp.float32)), axis=2)
            dim1 += 1
        len2, dim2 = x2.shape[1:]
        if not self.nobias[1]:
            x2 = F.concat((x2, xp.ones((batch_size, len2, 1),
                                       dtype=xp.float32)), axis=2)
            dim2 += 1
        x1_reshaped = F.reshape(x1, (batch_size * len1, dim1))
        W_reshaped = F.reshape(F.transpose(self.W, (0, 2, 1)),
                               (dim1, out_size * dim2))
        affine = F.reshape(F.matmul(x1_reshaped, W_reshaped),
                           (batch_size, len1 * out_size, dim2))
        biaffine = F.transpose(
            F.reshape(batch_matmul(affine, x2, transb=True),
                      (batch_size, len1, out_size, len2)),
            (0, 1, 3, 2))
        if not self.nobias[2]:
            biaffine += F.broadcast_to(self.b, biaffine.shape)
        return biaffine
项目:mlpnlp-nmt    作者:mlpnlp    | 项目源码 | 文件源码
def prepareDecoder(self, encInfo):
        self.model.decLSTM.reset_state()
        if self.attn_mode == 0:
            aList = None
        elif self.attn_mode == 1:
            aList = encInfo.attnList
        elif self.attn_mode == 2:
            aList = self.model.attnM(
                chaFunc.reshape(encInfo.attnList,
                                (encInfo.cMBSize * encInfo.encLen, self.hDim)))
            # TODO: ???????encoder???????
        else:
            assert 0, "ERROR"
        xp = cuda.get_array_module(encInfo.lstmVars[0].data)
        finalHS = chainer.Variable(
            xp.zeros(
                encInfo.lstmVars[0].data.shape,
                dtype=xp.float32))  # ???input_feed?0????
        return aList, finalHS

    ############################
项目:chainer_frmqn    作者:okdshin    | 项目源码 | 文件源码
def calc_loss(self, state, state_dash, actions, rewards, done_list):
        assert(state.shape == state_dash.shape)
        s = state.reshape((state.shape[0], reduce(lambda x, y: x*y, state.shape[1:]))).astype(np.float32)
        s_dash = state_dash.reshape((state.shape[0], reduce(lambda x, y: x*y, state.shape[1:]))).astype(np.float32)
        q = self.model.q_function(s)

        q_dash = self.model_target.q_function(s_dash)  # Q(s',*)
        max_q_dash = np.asarray(list(map(np.max, q_dash.data)), dtype=np.float32) # max_a Q(s',a)

        target = q.data.copy()
        for i in range(self.replay_batch_size):
            assert(self.replay_batch_size == len(done_list))
            r = np.sign(rewards[i]) if self.clipping else rewards[i]
            if done_list[i]:
                discounted_sum = r
            else:
                discounted_sum = r + self.gamma * max_q_dash[i]
            assert(self.replay_batch_size == len(actions))
            target[i, actions[i]] = discounted_sum

        loss = F.sum(F.huber_loss(Variable(target), q, delta=1.0)) #/ self.replay_batch_size
        return loss, q
项目:chainer-gan-improvements    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x):
        minibatch_size = x.shape[0]
        activation = F.reshape(self.t(x), (-1, self.n_kernels, self.kernel_dim))
        activation_ex = F.expand_dims(activation, 3)
        activation_ex_t = F.expand_dims(F.transpose(activation, (1, 2, 0)), 0)
        activation_ex, activation_ex_t = F.broadcast(activation_ex, activation_ex_t)
        diff = activation_ex - activation_ex_t

        xp = chainer.cuda.get_array_module(x.data)
        eps = F.expand_dims(xp.eye(minibatch_size, dtype=xp.float32), 1)
        eps = F.broadcast_to(eps, (minibatch_size, self.n_kernels, minibatch_size))
        sum_diff = F.sum(abs(diff), axis=2)
        sum_diff = F.broadcast_to(sum_diff, eps.shape)
        abs_diff = sum_diff + eps

        minibatch_features = F.sum(F.exp(-abs_diff), 2)
        return F.concat((x, minibatch_features), axis=1)
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, X, return_last=False):
        batchsize = X.shape[0]
        seq_length = X.shape[1]
        enmbedding = self.embed(X)
        enmbedding = F.swapaxes(enmbedding, 1, 2)
        residual_input = enmbedding if self.ndim_h == self.ndim_embedding else 0

        out_data = self._forward_layer(0, enmbedding)
        for layer_index in xrange(1, self.num_blocks * self.num_layers_per_block):
            out_data = self._forward_layer(layer_index, out_data)
            if (layer_index + 1) % self.num_layers_per_block == 0:
                if self.using_dropout:
                    out_data = F.dropout(out_data, ratio=self.dropout)
                out_data += residual_input
                residual_input = out_data

        if return_last:
            out_data = out_data[:, :, -1, None]

        out_data = self.dense(out_data)
        out_data = F.reshape(F.swapaxes(out_data, 1, 2), (-1, self.vocab_size))

        return out_data
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        xp = chainer.cuda.get_array_module(x.data)
        batchsize = x.shape[0]
        if self.train_weights == False and self.initial_T is not None:
            self.T.W.data = self.initial_T

        M = F.reshape(self.T(x), (-1, self.num_kernels, self.ndim_kernel))
        M = F.expand_dims(M, 3)
        M_T = F.transpose(M, (3, 1, 2, 0))
        M, M_T = F.broadcast(M, M_T)

        norm = F.sum(abs(M - M_T), axis=2)
        eraser = F.broadcast_to(xp.eye(batchsize, dtype=x.dtype).reshape((batchsize, 1, batchsize)), norm.shape)
        c_b = F.exp(-(norm + 1e6 * eraser))
        o_b = F.sum(c_b, axis=2)

        if self.train_weights == False:
            self.initial_T = self.T.W.data

        return F.concat((x, o_b), axis=1)
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def cross_entropy(self, raw_network_output, target_signal_data):
        if isinstance(target_signal_data, Variable):
            raise Exception("target_signal_data cannot be Variable")

        raw_network_output = self.to_variable(raw_network_output)
        target_width = target_signal_data.shape[1]
        batchsize = raw_network_output.data.shape[0]

        if raw_network_output.data.shape[3] != target_width:
            raise Exception("raw_network_output.width != target.width")

        # (batchsize * time_step,) <- (batchsize, time_step)
        target_signal_data = target_signal_data.reshape((-1,))
        target_signal = self.to_variable(target_signal_data)

        # (batchsize * time_step, channels) <- (batchsize, channels, 1, time_step)
        raw_network_output = F.transpose(raw_network_output, (0, 3, 2, 1))
        raw_network_output = F.reshape(raw_network_output, (batchsize * target_width, -1))

        loss = F.softmax_cross_entropy(raw_network_output, target_signal)
        return loss
项目:googlenet    作者:nutszebra    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = F.relu(self.conv2_1x1(h))
        h = F.relu(self.conv2_3x3(h))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception3a(h)
        h = self.inception3b(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception4a(h)
        h = self.inception4b(h)
        h = self.inception4c(h)
        h = self.inception4d(h)
        h = self.inception4e(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception5a(h)
        h = F.relu(self.inception5b(h))
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.4, train=train)
        h = self.linear(h)
        return h
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def __call__(self, h, train=True):
        """
        in_type:
            h: float32
        in_shape:
            h: (batch_size, hidden_num)
        out_type: float32
        out_shape: (batch_size, rating_num, predicted_item_num)
        """

        xp = cuda.get_array_module(h.data)
        h = self.p(h)
        if hasattr(self, 'q'):
            h = self.q(h)
        h = F.reshape(h, (-1, self.rating_num, self.item_num, 1))
        w = chainer.Variable(xp.asarray(np.tri(self.rating_num, dtype=np.float32).reshape(self.rating_num, self.rating_num, 1, 1)), volatile=h.volatile)
        h = F.convolution_2d(h, w)
        return F.reshape(h, (-1, self.rating_num, self.item_num))
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __call__(self, x, test=False, retain_forward=False):
        h = self.c_first(x, test=test, retain_forward=retain_forward)
        for i in range(self.down_layers-1):
            h = getattr(self, 'c'+str(i))(h, test=test, retain_forward=retain_forward)
        if not self.conv_as_last:
            _b, _ch, _w, _h = h.data.shape
            self.last_shape=(_b, _ch, _w, _h)
            h = F.reshape(h, (_b, _ch*_w*_h))
        h = self.c_last(h, test=test, retain_forward=retain_forward)
        return h
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def differentiable_backward(self, g):
        g = self.c_last.differentiable_backward(g)
        if not self.conv_as_last:
            _b, _ch, _w, _h = self.last_shape
            g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __call__(self, x, test=False, retain_forward=False):
        h = self.c_first(x, test=test, retain_forward=retain_forward)
        for i in range(self.down_layers-1):
            h = getattr(self, 'c'+str(i))(h, test=test, retain_forward=retain_forward)
        _b, _ch, _w, _h = h.data.shape
        self.last_shape=(_b, _ch, _w, _h)
        h = F.reshape(h, (_b, _ch*_w*_h))
        h0 = self.c_last_0(h, test=test, retain_forward=retain_forward)
        h1 = self.c_last_1_0(h, test=test, retain_forward=retain_forward)
        #h1 = self.c_last_1_1(h1, test=test, retain_forward=retain_forward)
        #h1 = self.c_last_1_2(h1, test=test, retain_forward=retain_forward)
        return h0, h1
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def differentiable_backward(self, g):
        g = self.c_last_0.differentiable_backward(g)
        _b, _ch, _w, _h = self.last_shape
        g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def __call__(self, z, test=False):
        h = self.c_first(z, test=test)
        h = F.reshape(h, (h.data.shape[0], self.base_size, 4, 4))
        for i in range(self.up_layers):
            h = getattr(self, 'c'+str(i))(h, test=test)
        return h
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def differentiable_backward(self, g):
        g = self.c_last.differentiable_backward(g)
        _b, _ch, _w, _h = self.last_shape
        g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
项目:chainer-spatial-transformer-networks    作者:hvy    | 项目源码 | 文件源码
def affine_matrix(self, x):
        h = F.max_pooling_2d(x, 2, 2)
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        theta = F.reshape(self.fc(h), (x.shape[0], 2, 3))
        return theta
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def reorg(input, stride=2):
    batch_size, input_channel, input_height, input_width = input.data.shape
    output_height, output_width, output_channel = int(input_height/stride), int(input_width/stride), input_channel*stride*stride
    output = F.transpose(F.reshape(input, (batch_size, input_channel, output_height, stride, output_width, stride)), (0, 1, 2, 4, 3, 5))
    output = F.transpose(F.reshape(output, (batch_size, input_channel, output_height, output_width, -1)), (0, 4, 1, 2, 3))
    output = F.reshape(output, (batch_size, output_channel, output_height, output_width))
    return output