Python keras.callbacks 模块,LearningRateScheduler() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用keras.callbacks.LearningRateScheduler()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def get_callbacks(self, model_prefix='Model'):
        """
        Creates a list of callbacks that can be used during training to create a
        snapshot ensemble of the model.

        Args:
            model_prefix: prefix for the filename of the weights.

        Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
                 SnapshotModelCheckpoint] which can be provided to the 'fit' function
        """
        if not os.path.exists('weights/'):
            os.makedirs('weights/')

        callback_list = [ModelCheckpoint('weights/%s-Best.h5' % model_prefix, monitor='val_acc',
                                         save_best_only=True, save_weights_only=True),
                         LearningRateScheduler(schedule=self._cosine_anneal_schedule),
                         SnapshotModelCheckpoint(self.T, self.M, fn_prefix='weights/%s' % model_prefix)]

        return callback_list
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:enhance    作者:cdiazbas    | 项目源码 | 文件源码
def train(self, n_iterations):
        print("Training network...")        

        # Recover losses from previous run
        if (self.option == 'continue'):
            with open("{0}_{1}_loss.json".format(self.root, self.depth), 'r') as f:
                losses = json.load(f)
        else:
            losses = []

        self.checkpointer = ModelCheckpoint(filepath="{0}_{1}_weights.hdf5".format(self.root, self.depth), verbose=1, save_best_only=True)
        self.history = LossHistory(self.root, self.depth, losses, {'name': '{0}_{1}'.format(self.root, self.depth), 'init_t': time.asctime()})

        self.reduce_lr = LearningRateScheduler(self.learning_rate)

        self.metrics = self.model.fit_generator(self.training_generator(), self.batchs_per_epoch_training, epochs=n_iterations, 
            callbacks=[self.checkpointer, self.history, self.reduce_lr], validation_data=self.validation_generator(), validation_steps=self.batchs_per_epoch_validation)

        self.history.finalize()
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def train_with_data_augmentation(self, batch_size, num_epoch, lr_schedule):
        datagen = ImageDataGenerator(
            width_shift_range=0.125, # randomly shift images horizontally, fraction
            height_shift_range=0.125, # randomly shift images vertically, fraction
            horizontal_flip=True)

        opt = keras.optimizers.SGD(lr=lr_schedule(0), momentum=0.9, nesterov=True)
        callback_list = [LearningRateScheduler(lr_schedule)]
        self.ae.compile(optimizer=opt, loss='mse')
        assert False, 'seems that y is not augmented.'
        # history = self.ae.fit_generator(
        #     datagen.flow(
        #         self.dataset.train_xs,
        #         self.dataset.train_xs,
        #     nb_epoch=num_epoch,
        #     batch_size=batch_size,
        #     validation_data=(self.dataset.test_xs, self.dataset.test_xs),
        #     shuffle=True, callbacks=callback_list)
        self.history = history.history
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def linear_schedule(self, schedules, rate):
        schedules = np.array([-1]+schedules) * self.parameters['full_epoch']
        ratios = np.ones_like(schedules)
        print(schedules,ratios)
        for i in range(len(ratios)):
            ratios[i] = self.parameters['lr'] * (rate**i)
        def fn(epoch):
            for i,s in enumerate(schedules):
                if epoch < s:
                    return float(ratios[i-1])
            return float(ratios[-1])
        return LearningRateScheduler(fn)
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def train(model_name, fold_count, train_full_set=False, load_weights_path=None, ndsb3_holdout=0, manual_labels=True):
    batch_size = 16
    train_files, holdout_files = get_train_holdout_files(train_percentage=80, ndsb3_holdout=ndsb3_holdout, manual_labels=manual_labels, full_luna_set=train_full_set, fold_count=fold_count)

    # train_files = train_files[:100]
    # holdout_files = train_files[:10]
    train_gen = data_generator(batch_size, train_files, True)
    print('train_gen_len:',train_gen)
    holdout_gen = data_generator(batch_size, holdout_files, False)
    for i in range(0, 10):
        tmp = next(holdout_gen)
        cube_img = tmp[0][0].reshape(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1)
        cube_img = cube_img[:, :, :, 0]
        cube_img *= 255.
        cube_img += MEAN_PIXEL_VALUE
        # helpers.save_cube_img("c:/tmp/img_" + str(i) + ".png", cube_img, 4, 8)
        # print(tmp)

    learnrate_scheduler = LearningRateScheduler(step_decay)
    model = get_net(load_weight_path=load_weights_path)
    holdout_txt = "_h" + str(ndsb3_holdout) if manual_labels else ""
    if train_full_set:
        holdout_txt = "_fs" + holdout_txt
    checkpoint = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5", monitor='val_loss', verbose=1, save_best_only=not train_full_set, save_weights_only=False, mode='auto', period=1)
    checkpoint_fixed_name = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_best.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
    model.fit_generator(train_gen, len(train_files) / 1, 12, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / 1, callbacks=[checkpoint, checkpoint_fixed_name, learnrate_scheduler])
    model.save("workdir/model_" + model_name + "_" + holdout_txt + "_end.hd5")
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        loss = logs.items()[1][1]  # get loss
        print "loss: ", loss
        old_lr = self.model.optimizer.lr.get_value()  # get old lr
        new_lr = old_lr * np.exp(loss)  # lr*exp(loss)
        k.set_value(self.model.optimizer.lr, new_lr)


# decaylr=LearningRateScheduler(decay_sch)


# checkpoint=ModelCheckpoint("weights/adam_noep{0}_batch{1}_seq_{2}.hdf5".format(\
# no_epochs,batch, seq_length), monitor='loss', verbose=0,
# save_best_only=True, save_weights_only=False, mode='min')
项目:textgenrnn    作者:minimaxir    | 项目源码 | 文件源码
def train_on_texts(self, texts, batch_size=128, num_epochs=50, verbose=1):

        # Encode chars as X and y.
        X = []
        y = []

        for text in texts:
            subset_x, subset_y = textgenrnn_encode_training(text,
                                                            self.META_TOKEN)
            for i in range(len(subset_x)):
                X.append(subset_x[i])
                y.append(subset_y[i])

        X = np.array(X)
        y = np.array(y)

        X = self.tokenizer.texts_to_sequences(X)
        X = sequence.pad_sequences(X, maxlen=40)
        y = textgenrnn_encode_cat(y, self.vocab)

        base_lr = 2e-3

        # scheduler function must be defined inline.
        def lr_linear_decay(epoch):
            return (base_lr * (1 - (epoch / num_epochs)))

        self.model.fit(X, y, batch_size=batch_size, epochs=num_epochs,
                       callbacks=[LearningRateScheduler(lr_linear_decay)],
                       verbose=verbose)
项目:EUSIPCO2017    作者:Veleslavia    | 项目源码 | 文件源码
def train(self):
        model = self.model_module.build_model(IRMAS_N_CLASSES)

        early_stopping = EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_EPOCH)
        save_clb = ModelCheckpoint(
            "{weights_basepath}/{model_path}/".format(
                weights_basepath=MODEL_WEIGHT_BASEPATH,
                model_path=self.model_module.BASE_NAME) +
            "epoch.{epoch:02d}-val_loss.{val_loss:.3f}-fbeta.{val_fbeta_score:.3f}"+"-{key}.hdf5".format(
                key=self.model_module.MODEL_KEY),
            monitor='val_loss',
            save_best_only=True)
        lrs = LearningRateScheduler(lambda epoch_n: self.init_lr / (2**(epoch_n//SGD_LR_REDUCE)))
        model.summary()
        model.compile(optimizer=self.optimizer,
                      loss='categorical_crossentropy',
                      metrics=['accuracy', fbeta_score])

        history = model.fit_generator(self._batch_generator(self.X_train, self.y_train),
                                      samples_per_epoch=self.model_module.SAMPLES_PER_EPOCH,
                                      nb_epoch=MAX_EPOCH_NUM,
                                      verbose=2,
                                      callbacks=[save_clb, early_stopping, lrs],
                                      validation_data=self._batch_generator(self.X_val, self.y_val),
                                      nb_val_samples=self.model_module.SAMPLES_PER_VALIDATION,
                                      class_weight=None,
                                      nb_worker=1)

        pickle.dump(history.history, open('{history_basepath}/{model_path}/history_{model_key}.pkl'.format(
            history_basepath=MODEL_HISTORY_BASEPATH,
            model_path=self.model_module.BASE_NAME,
            model_key=self.model_module.MODEL_KEY),
            'w'))
项目:tying-wv-and-wc    作者:icoxfog417    | 项目源码 | 文件源码
def get_lr_scheduler(self):
        def scheduler(epoch):
            epoch_interval = K.get_value(self.epoch_interval)
            if epoch != 0 and (epoch + 1) % epoch_interval == 0:
                lr = K.get_value(self.lr)
                decay = K.get_value(self.decay)
                K.set_value(self.lr, lr * decay)
                if self.verbose:
                    print(self.get_config())
            return K.get_value(self.lr)

        return LearningRateScheduler(scheduler)
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def train(self, batch_size, num_epoch, lr_schedule):
        opt = keras.optimizers.SGD(lr=lr_schedule(0), momentum=0.9, nesterov=True)
        callback_list = [LearningRateScheduler(lr_schedule)]
        self.ae.compile(optimizer=opt, loss='mse')
        history = self.ae.fit(
            self.dataset.train_xs, self.dataset.train_xs,
            nb_epoch=num_epoch,
            batch_size=batch_size,
            validation_data=(self.dataset.test_xs, self.dataset.test_xs),
            shuffle=True, callbacks=callback_list)
        self.history = history.history
项目:kaggle_ndsb2017    作者:juliandewit    | 项目源码 | 文件源码
def train(model_name, fold_count, train_full_set=False, load_weights_path=None, ndsb3_holdout=0, manual_labels=True):
    batch_size = 16
    train_files, holdout_files = get_train_holdout_files(train_percentage=80, ndsb3_holdout=ndsb3_holdout, manual_labels=manual_labels, full_luna_set=train_full_set, fold_count=fold_count)

    # train_files = train_files[:100]
    # holdout_files = train_files[:10]
    train_gen = data_generator(batch_size, train_files, True)
    holdout_gen = data_generator(batch_size, holdout_files, False)
    for i in range(0, 10):
        tmp = next(holdout_gen)
        cube_img = tmp[0][0].reshape(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1)
        cube_img = cube_img[:, :, :, 0]
        cube_img *= 255.
        cube_img += MEAN_PIXEL_VALUE
        # helpers.save_cube_img("c:/tmp/img_" + str(i) + ".png", cube_img, 4, 8)
        # print(tmp)

    learnrate_scheduler = LearningRateScheduler(step_decay)
    model = get_net(load_weight_path=load_weights_path)
    holdout_txt = "_h" + str(ndsb3_holdout) if manual_labels else ""
    if train_full_set:
        holdout_txt = "_fs" + holdout_txt
    checkpoint = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5", monitor='val_loss', verbose=1, save_best_only=not train_full_set, save_weights_only=False, mode='auto', period=1)
    checkpoint_fixed_name = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_best.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
    model.fit_generator(train_gen, len(train_files) / 1, 12, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / 1, callbacks=[checkpoint, checkpoint_fixed_name, learnrate_scheduler])
    model.save("workdir/model_" + model_name + "_" + holdout_txt + "_end.hd5")
项目:loss-correction    作者:giorgiop    | 项目源码 | 文件源码
def lr_scheduler(self):

        def scheduler(epoch):
            if epoch > 80:
                return 0.001
            elif epoch > 40:
                return 0.01
            else:
                return 0.1

        print('LR scheduler')
        self.scheduler = LearningRateScheduler(scheduler)
项目:loss-correction    作者:giorgiop    | 项目源码 | 文件源码
def lr_scheduler(self):

        def scheduler(epoch):
            if epoch > 120:
                return 0.001
            elif epoch > 80:
                return 0.01
            else:
                return 0.1

        print('LR scheduler')
        self.scheduler = LearningRateScheduler(scheduler)
项目:FaceDetection    作者:youssefhb    | 项目源码 | 文件源码
def resume_training_from_snapshot(weights_file,callbacks_list,initialEpoch=0):

    model = create_model()
    sgd = SGD(lr=0.01, decay=0.0004, momentum=0.9, nesterov=True)

    # learning schedule callback
    lrate = LearningRateScheduler(step_decay)

    print "Loads weights from a snapshot"
    # Loads parameters from a snapshot
    model.load_weights(weights_file)

    print "compliling the model"
    model.compile(optimizer=sgd,loss=class_mode+'_crossentropy',metrics=['accuracy'])

    model.summary()

    train_generator,validation_generator = generateData()


    # initial_epoch: Epoch at which to start training (useful for resuming a previous training run)
    # teps_per_epoch: Total number of steps (batches of samples) to yield from generator 
    # before declaring one epoch finished and starting the next epoch. 
    # It should typically be equal to the number of unique samples of your dataset divided by the batch size.
    history = model.fit_generator(train_generator,
                        steps_per_epoch=614,
                        validation_data=validation_generator,
                        validation_steps=20,
                        epochs=1000,callbacks=callbacks_list,
                        verbose=1,initial_epoch=initialEpoch)

    saveModel(model)






# =================================================
#                       M A I N
# =================================================
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)
        self.gs = self.build_gs()
        self.gs2 = self.build_gs()

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder, self.gs])(x)
        y = Sequential(_decoder)(flatten(z))

        z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
        y2 = Sequential(_decoder)(flatten(z2))
        w2 = Sequential([*_encoder, self.gs2])(flatten(y2))

        data_dim = np.prod(input_shape)
        def rec(x, y):
            #return K.mean(K.binary_crossentropy(x,y))
            return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
                       K.reshape(y,(K.shape(x)[0],data_dim,)))

        def loss(x, y):
            return rec(x,y) + self.gs.loss()

        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
        self.loss = loss
        self.metrics.append(rec)
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.autoencoder = Model(x, y)
        self.autodecoder = Model(z2, w2)
        self.net = self.autoencoder
        y2_downsample = Sequential([
            Reshape((*input_shape,1)),
            MaxPooling2D((2,2))
            ])(y2)
        shape = K.int_shape(y2_downsample)[1:3]
        self.decoder_downsample = Model(z2, Reshape(shape)(y2_downsample))
        self.features = Model(x, Sequential([flatten, *_encoder[:-2]])(x))
        if 'lr_epoch' in self.parameters:
            ratio = self.parameters['lr_epoch']
        else:
            ratio = 0.5
        self.callbacks.append(
            LearningRateScheduler(lambda epoch: self.parameters['lr'] if epoch < self.parameters['full_epoch'] * ratio else self.parameters['lr']*0.1))
        self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def train_unet(self):

        img_size = self.flag.image_size
        batch_size = self.flag.batch_size
        epochs = self.flag.total_epoch

        datagen_args = dict(featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=5,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.05,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.05,  # randomly shift images vertically (fraction of total height)
                # fill_mode='constant',
                # cval=0.,
                horizontal_flip=False,  # randomly flip images
                vertical_flip=False)  # randomly flip images

        image_datagen = ImageDataGenerator(**datagen_args)
        mask_datagen = ImageDataGenerator(**datagen_args)

        seed = random.randrange(1, 1000)
        image_generator = image_datagen.flow_from_directory(
                    os.path.join(self.flag.data_path, 'train/IMAGE'),
                    class_mode=None, seed=seed, batch_size=batch_size, color_mode='grayscale')
        mask_generator = mask_datagen.flow_from_directory(
                    os.path.join(self.flag.data_path, 'train/GT'),
                    class_mode=None, seed=seed, batch_size=batch_size, color_mode='grayscale')
        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.9
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        model = get_unet(self.flag)
        if self.flag.pretrained_weight_path != None:
            model.load_weights(self.flag.pretrained_weight_path)

        if not os.path.exists(os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name)):
            mkdir_p(os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name))
        model_json = model.to_json()
        with open(os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name, 'model.json'), 'w') as json_file:
            json_file.write(model_json)
        vis = callbacks.trainCheck(self.flag)
        model_checkpoint = ModelCheckpoint(
                    os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name,'weights.{epoch:03d}.h5'), 
                    period=self.flag.total_epoch//10+1)
        learning_rate = LearningRateScheduler(self.lr_step_decay)
        model.fit_generator(
            self.train_generator(image_generator, mask_generator),
            steps_per_epoch= image_generator.n // batch_size,
            epochs=epochs,
            callbacks=[model_checkpoint, learning_rate, vis]
        )
项目:Ultras-Sound-Nerve-Segmentation---Kaggle    作者:Simoncarbo    | 项目源码 | 文件源码
def train_segment(train_imgs, train_masks, train_index,train_i,val_i,factor,factor_val):
    def dice_coef(y_true, y_pred):
        intersection = K.sum(K.sum(y_true * y_pred,axis = -1),axis = -1)
        sum_pred = K.sum(K.sum(y_pred,axis = -1),axis = -1)
        sum_true = K.sum(K.sum(y_true,axis = -1),axis = -1)

        weighting = K.greater_equal(sum_true,1)*factor+1
        return -K.mean(weighting*(2. * intersection  + smooth) / (sum_true + sum_pred + smooth))
    def dice_coef_wval(y_true, y_pred):
        intersection = K.sum(K.sum(y_true * y_pred,axis = -1),axis = -1)
        sum_pred = K.sum(K.sum(y_pred,axis = -1),axis = -1)
        sum_true = K.sum(K.sum(y_true,axis = -1),axis = -1)

        weighting = K.greater_equal(sum_true,1)*factor_val+1
        return -K.mean(weighting*(2. * intersection  + smooth) / (sum_true + sum_pred + smooth))

    model = models.segment()

    model.compile(optimizer =Adam(lr=1e-2), loss = dice_coef,metrics = [dice_coef_wval,dice_tresh,pres_acc])

    augmentation_ratio, data_generator = dm.data_generator_segment(nb_rows_small, nb_cols_small,nb_rows_mask_small, nb_cols_mask_small)

    def schedule(epoch):
        if epoch<=5:
            return 1e-2
        elif epoch<=10:
            return 5e-3
        elif epoch<=25:
            return 2e-3
        elif epoch<=40:
            return 1e-3
        else:
            return 5e-4
    lr_schedule= LearningRateScheduler(schedule)
    modelCheck = ModelCheckpoint('Saved/model2_weights_epoch_{epoch:02d}.hdf5', verbose=0, save_best_only=False)

    print('training starts...')
    epoch_history = model.fit_generator(\
    data_generator(train_imgs[train_i], train_masks[train_i], train_index[train_i],batch_size = len(np.unique(train_index[train_i,0]))), \
    samples_per_epoch = augmentation_ratio*len(train_i),nb_epoch = 50, callbacks = [lr_schedule,modelCheck], \
    validation_data = (train_imgs[val_i],train_masks[val_i]),max_q_size=10)

    return model, epoch_history

#==============================================================================
# Data importation and processing
#==============================================================================