Python chainer.optimizers 模块,RMSpropGraves() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用chainer.optimizers.RMSpropGraves()

项目:ImageCaptioning    作者:rkuga    | 项目源码 | 文件源码
def __init__(self,gpu,batchsize,data_dir,dataset,net,mode,epochs,save_every,size,**kwargs):
        super(Network, self).__init__(epochs,save_every)
        print "building ..."
        self.input_height=size
        self.input_width=size
        self.net = net
        self.mode=mode
        self.dataset=dataset
        self.train_data, self.test_data=self.get_dataset(data_dir,dataset)
        print 'input_channel ==> %d using %s dataset'%(self.in_channel, self.dataset)

        self.enc = GoogLeNet()
        self.dec = Decoder(self.in_size)

        self.xp = cuda.cupy
        cuda.get_device(gpu).use()

        self.enc.to_gpu()
        self.dec.to_gpu()

        self.o_dec = optimizers.RMSpropGraves()
        self.o_dec.setup(self.dec)

        self.batchsize=batchsize
项目:ROCStory_skipthought_baseline    作者:soskek    | 项目源码 | 文件源码
def setup_optimizer(self):
        optimizer = optimizers.RMSpropGraves(
            lr=self.args.start_lr, alpha=0.95, momentum=0.9, eps=1e-08)
        optimizer.setup(self)
        optimizer.add_hook(chainer.optimizer.GradientClipping(self.args.grad_clip))
        optimizer.add_hook(chainer.optimizer.WeightDecay(self.args.weight_decay))
        return optimizer
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def create(self):
        return optimizers.RMSpropGraves(0.1)
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def reset_optimizer(self):
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model)
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def __init__(self, gpu_id, state_dimention,batchsize,historysize, enable_controller,arch):
        self.gpu_id = gpu_id
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"
        self.arch = arch
        self.replay_size = batchsize
        self.data_size = historysize

        self.state_dimention = state_dimention
        print "Initializing DQN..."
        #   Initialization of Chainer 1.1.0 or older.
        #        print "CUDA init"
        #        cuda.init()

        print "Model Building"
        #self.model = dnn_6_new.Q_DNN(self.state_dimention,200,self.num_of_actions)
        self.model = self.set_model(self.arch,self.state_dimention,200,self.num_of_actions)
        self.model.to_gpu(self.gpu_id)


        self.model_target = copy.deepcopy(self.model)

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [np.zeros((self.data_size, 1, self.state_dimention), dtype=np.float32),
                  np.zeros(self.data_size, dtype=np.int8),
                  np.zeros((self.data_size, 1), dtype=np.float32),
                  np.zeros((self.data_size, 1, self.state_dimention), dtype=np.float32),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
项目:stock_dqn    作者:wdy06    | 项目源码 | 文件源码
def __init__(self, gpu_id, state_dimention,batchsize,historysize, enable_controller=[1, -1, 0],targetFlag=False):
        self.gpu_id = gpu_id
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"
        self.replay_size = batchsize
        self.data_size = historysize

        self.state_dimention = state_dimention
        self.targetFlag = targetFlag

        print "Initializing DQN..."
        #   Initialization of Chainer 1.1.0 or older.
        #        print "CUDA init"
        #        cuda.init()

        print "Model Building"
        self.model = dnn_6.Q_DNN(self.state_dimention,200,self.num_of_actions)
        self.model.to_gpu(self.gpu_id)


        self.model_target = copy.deepcopy(self.model)

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [np.zeros((self.data_size, 1, self.state_dimention), dtype=np.float32),
                  np.zeros(self.data_size, dtype=np.int8),
                  np.zeros((self.data_size, 1), dtype=np.float32),
                  np.zeros((self.data_size, 1, self.state_dimention), dtype=np.float32),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
项目:deel    作者:uei    | 项目源码 | 文件源码
def __init__(self, use_gpu, enable_controller, dim):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim

        print("Initializing Q-Network...")

        hidden_dim = 256
        self.model = FunctionSet(
            l4=F.Linear(self.dim*self.hist_size, hidden_dim, wscale=np.sqrt(2)),
            q_value=F.Linear(hidden_dim, self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, hidden_dim),
                                               dtype=np.float32))
        )
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
                  np.zeros(self.data_size, dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.int8),
                  np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
项目:der-network    作者:soskek    | 项目源码 | 文件源码
def setup_optimizer(self):
        optimizer = optimizers.RMSpropGraves(
            lr=self.args.start_lr, alpha=0.95, momentum=0.9, eps=1e-08)
        optimizer.setup(self)
        optimizer.add_hook(chainer.optimizer.GradientClipping(self.args.grad_clip))
        return optimizer
项目:chainer_frmqn    作者:okdshin    | 项目源码 | 文件源码
def __init__(self, state_shape, action_num, image_num_per_state,
            model,
            gamma=0.99, # discount factor
            replay_batch_size=32,
            replay_memory_size=5*10**4,
            target_model_update_freq=1,
            max_step=50,
            lr=0.00025,
            clipping=False # if True, ignore reward intensity
            ):
        print("initializing DQN...")
        self.action_num = action_num
        self.image_num_per_state = image_num_per_state
        self.gamma = gamma
        self.replay_batch_size = replay_batch_size
        self.replay_memory_size = replay_memory_size
        self.target_model_update_freq = target_model_update_freq
        self.max_step = max_step
        self.clipping = clipping

        print("Initializing Model...")
        self.model = model
        self.model_target = copy.deepcopy(self.model)

        print("Initializing Optimizer")
        self.optimizer = optimizers.RMSpropGraves(lr=lr, alpha=0.95, momentum=0.0, eps=0.01)
        self.optimizer.setup(self.model)
        self.optimizer.add_hook(chainer.optimizer.GradientClipping(20))

        print("Initializing Replay Buffer...")
        self.dataset = dataset.DataSet(
                max_size=replay_memory_size, max_step=max_step, frame_shape=state_shape, frame_dtype=np.uint8)

        self.xp = model.xp
        self.state_shape = state_shape
项目:NeuralStyleTransfer    作者:Francis-Hsu    | 项目源码 | 文件源码
def optimize_rmsprop(self, init_img, lr=0.1, alpha=0.95, momentum=0.9, eps=1e-4,
                         iterations=2000, save=50, filename='iter', str_contrast=False):
        chainer_rms = optimizers.RMSpropGraves(lr=lr, alpha=alpha, momentum=momentum, eps=eps)
        state = {'n': xp.zeros_like(init_img.data), 'g': xp.zeros_like(init_img.data),
                 'delta': xp.zeros_like(init_img.data)}
        out_img = Variable(xp.zeros_like(init_img.data), volatile=True)

        time_start = time.time()
        for epoch in range(iterations):
            loss = self.loss_total(init_img)
            loss.backward()
            loss.unchain_backward()

            # normalize gradient
            grad_l1_norm = xp.sum(xp.absolute(init_img.grad * init_img.grad))
            init_img.grad /= grad_l1_norm

            if gpu_flag:
                chainer_rms.update_one_gpu(init_img, state)
            else:
                chainer_rms.update_one_cpu(init_img, state)

            init_img.zerograd()

            # save image every 'save' iteration
            if save != 0 and (epoch + 1) % save == 0:
                if self.preserve_color:
                    init_img_lum = separate_lum_chr(init_img)[0]
                    if gpu_flag:
                        init_img_lum.to_gpu()
                    out_img.copydata(init_img_lum + self.content_img_chr)
                else:
                    out_img.copydata(init_img)
                save_image(out_img, filename + '_' + str(epoch + 1) + '.png', contrast=str_contrast)
                print("Image Saved at Iteration %.0f, Time Used: %.4f, Total Loss: %.4f" %
                      ((epoch + 1), (time.time() - time_start), loss.data))


# rescale an array to [0, 255]
项目:chainer_pong    作者:icoxfog417    | 项目源码 | 文件源码
def __init__(self, 
                    agent, 
                    memory_size=10**4,
                    replay_size=32,
                    gamma=0.99,
                    initial_exploration=10**4,
                    target_update_freq=10**4,
                    learning_rate=0.00025,
                    epsilon_decay=1e-6,
                    minimum_epsilon=0.1):
        self.agent = agent
        self.target = Q(self.agent.q.n_history, self.agent.q.n_action, on_gpu=self.agent.q.on_gpu)

        self.memory_size = memory_size
        self.replay_size = replay_size
        self.gamma = gamma
        self.initial_exploration = initial_exploration
        self.target_update_freq = target_update_freq
        self.learning_rate = learning_rate
        self.epsilon_decay = epsilon_decay
        self.minimum_epsilon = minimum_epsilon
        self._step = 0

        # prepare memory for replay
        n_hist = self.agent.q.n_history
        size = self.agent.q.SIZE
        self.memory = [
            np.zeros((memory_size, n_hist, size, size), dtype=np.float32),
            np.zeros(memory_size, dtype=np.uint8),
            np.zeros((memory_size, 1), dtype=np.float32),
            np.zeros((memory_size, n_hist, size, size), dtype=np.float32),
            np.zeros((memory_size, 1), dtype=np.bool)
        ]
        self.memory_text = [
            "state", "action", "reward", "next_state", "episode_end"
        ]

        # prepare optimizer
        self.optimizer = optimizers.RMSpropGraves(lr=learning_rate, alpha=0.95, momentum=0.95, eps=0.01)
        self.optimizer.setup(self.agent.q)
        self._loss = 9
        self._qv = 0
项目:doubleDQN    作者:masataka46    | 项目源码 | 文件源码
def __init__(self, enable_controller=[0, 1, 3, 4]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Breakout"

        print "Initializing DDQN..."
#   Initialization of Chainer 1.1.0 or older.
#        print "CUDA init"
#        cuda.init()

        print "Model Building"
        self.model = FunctionSet(
            l1=F.Convolution2D(4, 32, ksize=8, stride=4, nobias=False, wscale=np.sqrt(2)),
            l2=F.Convolution2D(32, 64, ksize=4, stride=2, nobias=False, wscale=np.sqrt(2)),
            l3=F.Convolution2D(64, 64, ksize=3, stride=1, nobias=False, wscale=np.sqrt(2)),
            l4=F.Linear(3136, 512, wscale=np.sqrt(2)),
            q_value=F.Linear(512, self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 512),
                                               dtype=np.float32))
        ).to_gpu()

        if args.resumemodel:
            # load saved model
            serializers.load_npz(args.resumemodel, self.model)
            print "load model from resume.model"


        self.model_target = copy.deepcopy(self.model)

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        if args.resumeD1 and args.resumeD2:
            # load saved D1 and D2
            npz_tmp1 = np.load(args.resumeD1)
            npz_tmp2 = np.load(args.resumeD2)
            self.D = [npz_tmp1['D0'],
                      npz_tmp1['D1'],
                      npz_tmp1['D2'],
                      npz_tmp2['D3'],
                      npz_tmp2['D4']]
            npz_tmp1.close()
            npz_tmp2.close()
            print "loaded stored D1 and D2"
        else:
            self.D = [np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                      np.zeros(self.data_size, dtype=np.uint8),
                      np.zeros((self.data_size, 1), dtype=np.int8),
                      np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                      np.zeros((self.data_size, 1), dtype=np.bool)]
            print "initialize D data"