Python caffe 模块,proto() 实例源码

我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用caffe.proto()

项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
项目:mxnet-ssd    作者:zhreshold    | 项目源码 | 文件源码
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def LeNet(lmdb, batch_size):
  # our version of LeNet: a series of linear and simple nonlinear transformations
  n = caffe.NetSpec()

  n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                           transform_param=dict(scale=1./255), ntop=2)

  n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
  n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
  n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
  n.relu1 = L.ReLU(n.fc1, in_place=True)
  n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
  n.loss =  L.SoftmaxWithLoss(n.score, n.label)

  proto = n.to_proto()
  proto.name = 'LeNet'
  return proto
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def protoBlobFileToND(proto_file):
    data = ''
    file = open(proto_file, "r")
    if not file:
        raise Exception("ERROR (" + proto_file + ")!")
    data = file.read()
    file.close()

    if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y)
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:CaffeSVD    作者:wkcn    | 项目源码 | 文件源码
def read_db(db_name):
    lmdb_env = lmdb.open(db_name)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()

    X = []
    y = []
    cnts = {}
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        label = datum.label
        data = caffe.io.datum_to_array(datum)
        #data = data.swapaxes(0, 2).swapaxes(0, 1)
        X.append(data)
        y.append(label)
        if label not in cnts:
            cnts[label] = 0
        cnts[label] += 1
        #plt.imshow(data)
        #plt.show()
    return X, np.array(y), cnts
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def read_proto_solver_file(file_path):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return read_proto_file(file_path, solver_config)
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def getMeanImage(mean_path):

    # the following code is from https://github.com/BVLC/caffe/issues/290
    blob = caffe.proto.caffe_pb2.BlobProto()
    data = open( mean_path , 'rb' ).read()
    blob.ParseFromString(data)
    arr = np.array( caffe.io.blobproto_to_array(blob) )
    out = arr[0]
    np.save( sys.argv[2] , out )
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def generate_solver_proto(solver_fn, model_fn, trainOpts):
        from caffe.proto import caffe_pb2
        solver = caffe_pb2.SolverParameter()
        solver.net = model_fn

        if trainOpts.num_lr_decays > 0:
            solver.lr_policy = 'step'
            solver.gamma = trainOpts.lr_decay_factor
            solver.stepsize = int(trainOpts.iters/(trainOpts.num_lr_decays+1))
        else:
            solver.lr_policy = 'fixed'
        solver.base_lr = trainOpts.init_lr
        solver.max_iter = trainOpts.iters
        solver.display = 20
        solver.momentum = 0.9
        solver.weight_decay = trainOpts.paramReg

        solver.test_state.add()
        solver.test_state.add()
        solver.test_state[0].stage.append('TestRecognition')
        solver.test_state[1].stage.append('TestZeroShot')
        solver.test_iter.extend([20, 20])
        solver.test_interval = 100

        solver.snapshot = 5000
        solver.snapshot_prefix = os.path.splitext(model_fn)[0]

        with open(solver_fn, 'w') as f:
            f.write(str(solver))
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def prep_for_deploy(self, batch_size, source_net=False, target_net=False, deploy_fn='deploy.proto', caffemodel_fn='score.caffemodel', gpu_id=0):
        caffe.set_mode_gpu()
        caffe.set_device(gpu_id)

        self.generate_deploy_proto(deploy_fn, batch_size, source_net=source_net, target_net=target_net)
        self.deploy = caffe.Net(deploy_fn, caffe.TEST, weights=caffemodel_fn)

        self._set_semantics(self.deploy, source=False, init_cw=False)
        self._set_semantics(self.deploy, source=True, init_cw=False)
项目:mxnet-ssd    作者:zhreshold    | 项目源码 | 文件源码
def read_proto_solver_file(file_path):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return read_proto_file(file_path, solver_config)
项目:deepwater-nae    作者:h2oai    | 项目源码 | 文件源码
def solver_graph(self):
        proto = caffe_pb2.SolverParameter()
        proto.type = self.cmd.solver_type
        if self.device is not None:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'GPU')
            proto.device_id = self.device
        else:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'CPU')
        proto.lr_policy = 'fixed'
        proto.base_lr = self.cmd.learning_rate
        proto.momentum = self.cmd.momentum
        proto.max_iter = int(2e9)
        proto.random_seed = self.cmd.random_seed + self.rank
        print('Setting seed ', proto.random_seed, file = sys.stderr)
        proto.display = 1

        batch = int(solver.cmd.input_shape[0] / solver.size)
        if self.cmd.graph:
            dir = os.path.dirname(os.path.realpath(__file__))
            proto.net = dir + '/' + self.cmd.graph + '.prototxt'
        else:
            proto.train_net_param.MergeFrom(self.net_def(caffe.TRAIN))
            proto.test_net_param.add().MergeFrom(self.net_def(caffe.TEST))

        proto.test_iter.append(1)
        proto.test_interval = 999999999  # cannot disable or set to 0
        proto.test_initialization = False
        return proto
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def __init__(self, solver_prototxt=None, path=None, base_lr=0.01, lr_policy="step", 
        gamma=0.1, stepsize=20000, momentum=0.9, weight_decay=0.0005,
        regularization_type="L2", clip_gradients=None):

        assert (path is not None) or (solver_prototxt is not None),\
            'Need to specify either path or solver_prototxt.'

        self._solver = caffe_pb2.SolverParameter()

        if solver_prototxt is not None:
            self._solver_prototxt = solver_prototxt
            with open(solver_prototxt, 'rt') as f:
                pb2.text_format.Merge(f.read(), self._solver)                                   
        elif path is not None:
            self._solver_prototxt = osp.join(path, 'solver.prototxt')
            # update proto object
            self._solver.net = osp.join(path, 'train_val.prototxt')
            self._solver.base_lr = base_lr
            self._solver.lr_policy = lr_policy
            self._solver.gamma = gamma
            self._solver.stepsize = stepsize
            self._solver.momentum = momentum
            self._solver.weight_decay = weight_decay
            self._solver.regularization_type = regularization_type
            # caffe solver snapshotting is disabled
            self._solver.snapshot = 0
            # shut down caffe display
            self._solver.display = 0
            # shut down caffe validation
            self._solver.test_iter.append(0)
            self._solver.test_interval = 1000
            if clip_gradients is not None:
                self._solver.clip_gradients = clip_gradients
项目:Land_Use_CNN    作者:BUPTLdy    | 项目源码 | 文件源码
def caffenet(data, label=None, train=True, num_classes=1000,
             classifier_name='fc8', learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with open('/home/ldy/workspace/caffe/models/finetune_UCMerced_LandUse/deploy.prototxt','w') as f:
        f.write(str(n.to_proto()))
        return f.name
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchTriplet_pos(out_dim):
  n = caffe.NetSpec()
  n.data_p              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_p, n.relu1_p  = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
  n.pool1_p = pooling(n.relu1_p, 3, stride=2)

  n.conv2_p, n.relu2_p  = conv_relu_triplet_dep(n.pool1_p, 5, 128)
  n.pool2_p = pooling(n.relu2_p, 3, stride=2)

  n.conv3_p, n.relu3_p  = conv_relu_triplet_dep(n.pool2_p, 3, 256)

  n.conv4_p, n.relu4_p  = conv_relu_triplet_dep(n.relu3_p, 3, 256)

  n.conv5_p, n.relu5_p  = conv_relu_triplet_dep(n.relu4_p, 3, 256)
  n.pool5_p = pooling(n.relu5_p, 3, stride=2)

  n.fc6_p, n.relu6_p    = fc_relu_triplet_dep(n.pool5_p, 512)

  n.fc7_p, n.relu7_p    = fc_relu_triplet_dep(n.relu6_p, 512)

  #n.fc8_p, n.feat_p     = fc_norm_triplet_dep(n.relu7_p, out_dim)
  n.feat_p     = fc_triplet_dep(n.relu7_p, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchTriplet_pos(out_dim):
  n = caffe.NetSpec()
  n.data_p              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_p, n.relu1_p  = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
  n.pool1_p = pooling(n.relu1_p, 3, stride=2)

  n.conv2_p, n.relu2_p  = conv_relu_triplet_dep(n.pool1_p, 5, 128)
  n.pool2_p = pooling(n.relu2_p, 3, stride=2)

  n.conv3_p, n.relu3_p  = conv_relu_triplet_dep(n.pool2_p, 3, 256)

  n.conv4_p, n.relu4_p  = conv_relu_triplet_dep(n.relu3_p, 3, 256)

  n.conv5_p, n.relu5_p  = conv_relu_triplet_dep(n.relu4_p, 3, 256)
  n.pool5_p = pooling(n.relu5_p, 3, stride=2)

  n.fc6_p, n.relu6_p    = fc_relu_triplet_dep(n.pool5_p, 512)

  n.fc7_p, n.relu7_p    = fc_relu_triplet_dep(n.relu6_p, 512)

  #n.fc8_p, n.feat_p     = fc_norm_triplet_dep(n.relu7_p, out_dim)
  n.feat_p     = fc_triplet_dep(n.relu7_p, out_dim)
  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchTriplet_anchor(out_dim):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  n.norm_a = L.Normalize(n.feat_a,in_place=True)

  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def siamese_anchor(out_dim=100):
  n = caffe.NetSpec()
  n.data_a              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_a, n.relu1_a  = conv_relu_triplet_dep(n.data_a, 15, 64, stride = 3)
  n.pool1_a = pooling(n.relu1_a, 3, stride=2)

  n.conv2_a, n.relu2_a  = conv_relu_triplet_dep(n.pool1_a, 5, 128)
  n.pool2_a = pooling(n.relu2_a, 3, stride=2)

  n.conv3_a, n.relu3_a  = conv_relu_triplet_dep(n.pool2_a, 3, 256)

  n.conv4_a, n.relu4_a  = conv_relu_triplet_dep(n.relu3_a, 3, 256)

  n.conv5_a, n.relu5_a  = conv_relu_triplet_dep(n.relu4_a, 3, 256)
  n.pool5_a = pooling(n.relu5_a, 3, stride=2)

  n.fc6_a, n.relu6_a    = fc_relu_triplet_dep(n.pool5_a, 512)

  n.fc7_a, n.relu7_a    = fc_relu_triplet_dep(n.relu6_a, 512)

  #n.fc8_a, n.feat_a     = fc_norm_triplet_dep(n.relu7_a, out_dim)
  n.feat_a     = fc_triplet_dep(n.relu7_a, out_dim)
  #n.norm_a = L.Normalize(n.feat_a,in_place=True)

  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def siamese_pos(out_dim=100):
  n = caffe.NetSpec()
  n.data_p              = L.Input(name='data',
                                  shape=dict(dim=[1,1,225,225]))
  n.conv1_p, n.relu1_p  = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
  n.pool1_p = pooling(n.relu1_p, 3, stride=2)

  n.conv2_p, n.relu2_p  = conv_relu_triplet_dep(n.pool1_p, 5, 128)
  n.pool2_p = pooling(n.relu2_p, 3, stride=2)

  n.conv3_p, n.relu3_p  = conv_relu_triplet_dep(n.pool2_p, 3, 256)

  n.conv4_p, n.relu4_p  = conv_relu_triplet_dep(n.relu3_p, 3, 256)

  n.conv5_p, n.relu5_p  = conv_relu_triplet_dep(n.relu4_p, 3, 256)
  n.pool5_p = pooling(n.relu5_p, 3, stride=2)

  n.fc6_p, n.relu6_p    = fc_relu_triplet_dep(n.pool5_p, 512)

  n.fc7_p, n.relu7_p    = fc_relu_triplet_dep(n.relu6_p, 512)

  #n.fc8_p, n.feat_p     = fc_norm_triplet_dep(n.relu7_p, out_dim)
  n.feat_p     = fc_triplet_dep(n.relu7_p, out_dim)
  #n.norm_p = L.Normalize(n.feat_p,in_place=True)

  proto = n.to_proto()
  proto.name = 'SketchTriplet'
  return proto
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def readProtoSolverFile(filepath):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return readProtoFile(filepath, solver_config)
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def read_proto_solver_file(file_path):
    solver_config = ''
    if caffe_flag:
        solver_config = caffe.proto.caffe_pb2.NetParameter()
    else:
        solver_config = caffe_parse.caffe_pb2.NetParameter()
    return read_proto_file(file_path, solver_config)
项目:have-fun-with-machine-learning    作者:humphd    | 项目源码 | 文件源码
def setup_caffe(caffe_root):
    # Load Caffe's Python interface from the specified path
    sys.path.insert(0, os.path.join(caffe_root, 'python'))
    global caffe
    global caffe_pb2
    import caffe
    from caffe.proto import caffe_pb2

    # Set Caffe to use CPU mode so this will work on as many machines as possible.
    caffe.set_mode_cpu()
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def get_caffe_pb():
    dir = get_dataset_path('caffe')
    caffe_pb_file = os.path.join(dir, 'caffe_pb2.py')
    if not os.path.isfile(caffe_pb_file):
        proto_path = download(CAFFE_PROTO_URL, dir)
        ret = os.system('cd {} && protoc caffe.proto --python_out .'.format(dir))
        assert ret == 0, \
                "caffe proto compilation failed! Did you install protoc?"
    import imp
    return imp.load_source('caffepb', caffe_pb_file)
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def _score_proto(self, xFeat, source_net=False, target_net=False, mult=1., deploy=False):
        from caffe.proto import caffe_pb2
        ns = self.netspec
        w_params = {'lr_mult': mult, 'decay_mult': mult}

        # Compute semantic space
        name = 'SCoRe/sem/fc1'
        layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
        x = ns[name] = L.InnerProduct(xFeat, name=name, num_output=sum(self.code_dim), bias_term=False, **layer_params)

        # Note: In the case of completely binary semantics (Attributes), the two layers codewords+selector are compressed in 'SCoRe/obj/fc'.
        # Otherwise, semantic state scores are first computed in SCoRe/sem/fc2 and then grouped into class scores using a selector in SCoRe/obj/fc.
        # The selector is always kept fixed, and the codewords are learned whenever code_coeff<inf.
        xSem = 'SCoRe/sem/fc1' if self.semantics == ATTRIBUTES else 'SCoRe/sem/fc2'
        xObj = 'SCoRe/obj/fc'
        lCW = xObj + '/params' if self.semantics == ATTRIBUTES else xSem + '/params'
        if self.semantics != ATTRIBUTES:
            w_params = {'name': xSem+'/params',
                        'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': mult if self.code_coeff < np.inf else 0.0,       # Lock weights if code_coeff is inf
                        'decay_mult': mult if self.code_coeff < np.inf else 0.0}
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
            ns[xSem] = L.InnerProduct(x, name=xSem, num_output=sum(self.num_states), bias_term=False, **layer_params)

        # Compute object scores
        if source_net:
            w_params = {'name': xObj+'/params',
                        'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0,     # If Attributes than codewords are used in this layer
                        'decay_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0}  # Lock weights if code_coeff is inf
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
                                include=dict(not_stage='TestZeroShot')) if not deploy else {}
            ns[xObj] = L.InnerProduct(ns[xSem], name=xObj, num_output=len(self.train_classes), bias_term=False, **layer_params)

        if target_net:
            name = xObj+'_target'
            w_params = {'name': name+'/params', 'share_mode': caffe_pb2.ParamSpec.STRICT,
                        'lr_mult': 0.0, 'decay_mult': 0.0}
            layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
                                include=dict(phase=caffe.TEST, stage='TestZeroShot')) if not deploy else {}

            # NetSpec cannot handle two layers with same top blob defined for different phases/stages.
            # Workaround: Set in_place=True with no inputs, then define bottom and top fields manually.
            ns[name] = L.InnerProduct(name=name, bottom=[xSem], ntop=1, top=[xObj], in_place=True,
                                      num_output=len(self.test_classes), bias_term=False, **layer_params)
        return xObj, xSem, lCW
项目:DeepLearning    作者:corecai163    | 项目源码 | 文件源码
def caffenet(data, label=None, train=True, num_classes=1000,
             classifier_name='fc8', learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        return f.name


# Define a function style_net which calls caffenet on data from the Flickr style dataset.
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchANet(data_params, num_class = 20, val_mode = 0):
  """ our version of Sketch-A-Net
  data_params: batch_size, source, shape, scale, rot
  val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
  """
  n = caffe.NetSpec()
  if val_mode == 2:
    n.data        = L.Input(name='data',
                           shape=dict(dim=[1,1,225,225]))
  else:
    n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
                               ntop = 2, phase = val_mode,
                               param_str = str(data_params))
#==============================================================================
#     n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
#                              transform_param=dict(scale=1./255), ntop=2)
#==============================================================================

  n.conv1, n.relu1 = conv_relu(n.data, 15, 64, stride = 3)
  n.pool1 = pooling(n.relu1,3, stride = 2)

  n.conv2, n.relu2 = conv_relu(n.pool1, 5, 128)
  n.pool2 = pooling(n.relu2,3, stride = 2)

  n.conv3, n.relu3 = conv_relu(n.pool2, 3, 256, pad = 1)
  n.conv4, n.relu4 = conv_relu(n.relu3, 3, 256, 1, 1)

  n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1)
  n.pool5 = pooling(n.relu5,3, stride = 2)

  n.fc6, n.relu6 = fc_relu(n.pool5, 512)
  if val_mode != 2:
    n.drop6 = L.Dropout(n.relu6, dropout_ratio = 0.55, in_place = True)

    n.fc7, n.relu7 = fc_relu(n.drop6, 512)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio = 0.55, in_place = True)

    n.fc8 = fullconnect(n.drop7, num_class)
    n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
  else: #deploy mode
    n.fc7, n.relu7 = fc_relu(n.relu6, 512)
    n.fc8 = fullconnect(n.relu7, num_class)

  if val_mode==1:
    n.accuracy = L.Accuracy(n.fc8, n.label, phase = val_mode)

  proto = n.to_proto()
  proto.name = 'SketchANet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def SketchANet(data_params, num_class = 20, val_mode = 0):
  """ our version of Sketch-A-Net
  data_params: batch_size, source, shape, scale, rot
  val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
  """
  n = caffe.NetSpec()
  if val_mode == 2:
    n.data        = L.Input(name='data',
                           shape=dict(dim=[1,1,225,225]))
  else:
    n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
                               ntop = 2, phase = val_mode,
                               param_str = str(data_params))
#==============================================================================
#     n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
#                              transform_param=dict(scale=1./255), ntop=2)
#==============================================================================

  n.conv1, n.relu1 = conv_relu(n.data, 15, 64, stride = 3)
  n.pool1 = pooling(n.relu1,3, stride = 2)

  n.conv2, n.relu2 = conv_relu(n.pool1, 5, 128)
  n.pool2 = pooling(n.relu2,3, stride = 2)

  n.conv3, n.relu3 = conv_relu(n.pool2, 3, 256, pad = 1)
  n.conv4, n.relu4 = conv_relu(n.relu3, 3, 256, 1, 1)

  n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1)
  n.pool5 = pooling(n.relu5,3, stride = 2)

  n.fc6, n.relu6 = fc_relu(n.pool5, 512)
  if val_mode != 2:
    n.drop6 = L.Dropout(n.relu6, dropout_ratio = 0.55, in_place = True)

    n.fc7, n.relu7 = fc_relu(n.drop6, 512)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio = 0.55, in_place = True)

    n.fc8 = fullconnect(n.drop7, num_class)
    n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
  else: #deploy mode
    n.fc7, n.relu7 = fc_relu(n.relu6, 512)
    n.fc8 = fullconnect(n.relu7, num_class)

  if val_mode==1:
    n.accuracy = L.Accuracy(n.fc8, n.label, phase = val_mode)

  proto = n.to_proto()
  proto.name = 'SketchANet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def pretrain_sketch(data_params, num_class = 20, mode = 'train',learn_all=True):
  """ our version of Sketch-A-Net
  data_params: batch_size, source, shape, scale, rot
  val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
  """
  param = learned_param if learn_all else frozen_param
  n = caffe.NetSpec()
  if mode == 'deploy':
    n.data        = L.Input(name='data',
                           shape=dict(dim=[1,1,225,225]))
  else:
    n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
                               ntop = 2, phase = Train_Mode[mode],
                               param_str = str(data_params))

  n.conv1_a, n.relu1_a = conv_relu(n.data,15,64,3,param=param,name_prefix='conv1_a')
  n.pool1_a = pooling(n.relu1_a,3, 2)

  n.conv2_a, n.relu2_a = conv_relu(n.pool1_a,5,128,param=param,name_prefix='conv2_a')
  n.pool2_a = pooling(n.relu2_a,3,2)

  n.conv3_a, n.relu3_a = conv_relu(n.pool2_a,3,256,param=param,name_prefix='conv3_a')
  n.conv4_s, n.relu4_s = conv_relu(n.relu3_a,3,256,param=param,name_prefix='conv4_s')

  n.conv5_s, n.relu5_s = conv_relu(n.relu4_s,3,256,param=param,name_prefix='conv5_s')
  n.pool5_s = pooling(n.relu5_s,3,2)

  n.fc6_s, n.relu6_s = fc_relu(n.pool5_s, 512,param=param,name_prefix='fc6_s')

  if mode == 'train':
    n.drop6_s = fc7input = L.Dropout(n.relu6_s, dropout_ratio=0.55,in_place=True)
  else:
    fc7input = n.relu6_s;
  n.fc7_s, n.relu7_s = fc_relu(fc7input, 512, param=param,name_prefix='fc7_s')
  if mode =='train':
    n.drop7_s = fc8input= L.Dropout(n.relu7_s, dropout_ratio = 0.55,in_place=True)
  else:
    fc8input = n.relu7_s
  #n.feat8_r_s = fullconnect(fc8input, 100,param=learned_param,name_prefix='fc8_r_s')
  n.feat8_s = fullconnect(fc8input, num_class,param=learned_param,name_prefix='fc8_s')

  if mode != 'deploy':
    n.loss = L.SoftmaxWithLoss(n.feat8_s, n.label)

  if mode=='test': #validation
    n.accuracy = L.Accuracy(n.feat8_s, n.label, phase = Train_Mode[mode])

  proto = n.to_proto()
  proto.name = 'SketchANet'
  return proto
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def pretrain_image(data_params, num_class = 20, mode = 'train',learn_all=True):
  """ our version of Sketch-A-Net
  data_params: batch_size, source, shape, scale, rot
  val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
  """
  param = learned_param if learn_all else frozen_param
  n = caffe.NetSpec()
  if mode == 'deploy':
    n.data        = L.Input(name='data',
                           shape=dict(dim=[1,1,225,225]))
  else:
    n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
                               ntop = 2, phase = Train_Mode[mode],
                               param_str = str(data_params))

  n.conv1_p, n.relu1_p = conv_relu(n.data,15,64,3,param=param,name_prefix='conv1_p')
  n.pool1_p = pooling(n.relu1_p,3, 2)

  n.conv2_p, n.relu2_p = conv_relu(n.pool1_p,5,128,param=param,name_prefix='conv2_p')
  n.pool2_p = pooling(n.relu2_p,3,2)

  n.conv3_p, n.relu3_p = conv_relu(n.pool2_p,3,256,param=param,name_prefix='conv3_p')
  n.conv4, n.relu4 = conv_relu(n.relu3_p,3,256,param=param,name_prefix='conv4')

  n.conv5, n.relu5 = conv_relu(n.relu4,3,256,param=param,name_prefix='conv5')
  n.pool5 = pooling(n.relu5,3,2)

  n.fc6, n.relu6 = fc_relu(n.pool5, 512,param=param,name_prefix='fc6')

  if mode == 'train':
    n.drop6 = fc7input = L.Dropout(n.relu6, dropout_ratio=0.55,in_place=True)
  else:
    fc7input = n.relu6;
  n.fc7, n.relu7 = fc_relu(fc7input, 512, param=param,name_prefix='fc7')
  if mode =='train':
    n.drop7 = fc8input= L.Dropout(n.relu7, dropout_ratio = 0.55,in_place=True)
  else:
    fc8input = n.relu7
  #n.feat8_r = fullconnect(fc8input, 100,param=learned_param,name_prefix='fc8_r')
  n.feat8 = fullconnect(fc8input, num_class,param=learned_param,name_prefix='fc8')

  if mode != 'deploy':
    n.loss = L.SoftmaxWithLoss(n.feat8, n.label)

  if mode=='test': #validation
    n.accuracy = L.Accuracy(n.feat8, n.label, phase = Train_Mode[mode])

  proto = n.to_proto()
  proto.name = 'SketchANet'
  return proto