Python caffe.proto.caffe_pb2 模块,NetParameter() 实例源码

我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用caffe.proto.caffe_pb2.NetParameter()

项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:CityHorizon    作者:CityStreetWander    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:cv4ag    作者:worldbank    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def _load_src_params_plain(self, pretrained_model):
        """ Load parameters from the source model
            All parameters are saved in a dictionary where
            the keys are the original layer names
        """
        # load pretrained model
        with open(pretrained_model, 'rb') as f:
            binary_content = f.read()

        model = caffe_pb2.NetParameter()
        model.ParseFromString(binary_content)
        layers = model.layer

        src_params = {}

        for lc in layers:
            name = lc.name
            src_params[name] = [np.reshape(np.array(lc.blobs[i].data), lc.blobs[i].shape.dim) for i in xrange(len(lc.blobs))]
            # if len(lc.blobs) >= 2:
                # src_params[name] = [np.reshape(np.array(lc.blobs[0].data), lc.blobs[0].shape.dim), 
                #     np.reshape(np.array(lc.blobs[1].data), lc.blobs[1].shape.dim)]

        return src_params
项目:ENet    作者:TimoSaemann    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:DepthSegnet    作者:hari-sikchi    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    print "hello"
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        print(len(layer.top))
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:DepthSegnet    作者:hari-sikchi    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        #print layer.type 
        #print type(layer.top)
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:DL8803    作者:NanditaDamaraju    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:DL8803    作者:NanditaDamaraju    | 项目源码 | 文件源码
def make_testable(train_model_path):
    # load the train net prototxt as a protobuf message
    with open(train_model_path) as f:
        train_str = f.read()
    train_net = caffe_pb2.NetParameter()
    text_format.Merge(train_str, train_net)

    # add the mean, var top blobs to all BN layers
    for layer in train_net.layer:
        if layer.type == "BN" and len(layer.top) == 1:
            layer.top.append(layer.top[0] + "-mean")
            layer.top.append(layer.top[0] + "-var")

    # remove the test data layer if present
    if train_net.layer[1].name == "data" and train_net.layer[1].include:
        train_net.layer.remove(train_net.layer[1])
        if train_net.layer[0].include:
            # remove the 'include {phase: TRAIN}' layer param
            train_net.layer[0].include.remove(train_net.layer[0].include[0])
    return train_net
项目:DAVIS-2016-Chanllege-Solution    作者:tangyuhao    | 项目源码 | 文件源码
def load(self, filename, bgr_to_rgb=True):
        """Load weights from a .caffemodel file and initialize counters.

        Params:
          filename: caffemodel file.
        """
        print('Loading Caffe file:', filename)
        caffemodel_params = caffe_pb2.NetParameter()
        caffemodel_str = open(filename, 'rb').read()
        caffemodel_params.ParseFromString(caffemodel_str)
        self.caffe_layers = caffemodel_params.layer

        # Layers collection.
        self.layers['convolution'] = [i for i, l in enumerate(self.caffe_layers)
                                      if l.type == 'Convolution']
        self.layers['l2_normalization'] = [i for i, l in enumerate(self.caffe_layers)
                                           if l.type == 'Normalize']
        # BGR to RGB convertion. Tries to find the first convolution with 3
        # and exchange parameters.
        if bgr_to_rgb:
            self.bgr_to_rgb = 1
项目:pytorch2caffe    作者:longcw    | 项目源码 | 文件源码
def parse_caffemodel(caffemodel):
    model = caffe_pb2.NetParameter()
    print 'Loading caffemodel: ', caffemodel
    with open(caffemodel, 'rb') as fp:
        model.ParseFromString(fp.read())

    return model
项目:MergeConvBN    作者:pby5    | 项目源码 | 文件源码
def get_netparameter(model):
  with open(model) as f:
    net = cp.NetParameter()
    pb.text_format.Parse(f.read(), net)
    return net
项目:emu    作者:mlosch    | 项目源码 | 文件源码
def _load_layer_types(prototxt):
        # Read prototxt with caffe protobuf definitions
        layers = caffe_pb2.NetParameter()
        with open(prototxt, 'r') as f:
            text_format.Merge(str(f.read()), layers)

        # Assign layer parameters to type dictionary
        types = OrderedDict()
        for i in range(len(layers.layer)):
            types[layers.layer[i].name] = layers.layer[i].type

        return types
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def parse_caffemodel(caffemodel):
    model = caffe_pb2.NetParameter()
    print 'Loading caffemodel: ', caffemodel
    with open(caffemodel, 'rb') as fp:
        model.ParseFromString(fp.read())

    return model
项目:Merge_bn_Caffe    作者:NHZlX    | 项目源码 | 文件源码
def get_netparameter(self, model):
        with open(model) as f:
            net = cp.NetParameter()
            pb.text_format.Parse(f.read(), net)
            return net
项目:CityHorizon    作者:CityStreetWander    | 项目源码 | 文件源码
def drop_absorber_weights(model, net):
    # load the prototxt file as a protobuf message
    with open(model) as f:
        str2 = f.read()
    msg = caffe_pb2.NetParameter()
    text_format.Merge(str2, msg)

    # iterate over all layers of the network
    for i, layer in enumerate(msg.layer):

        if not layer.type == 'Python':
            continue

        conv_layer = msg.layer[i - 2].name  # conv layers are always two layers behind dropout

        # get some necessary sizes
        kernel_size = 1
        shape_of_kernel_blob = net.params[conv_layer][0].data.shape
        number_of_feature_maps = list(shape_of_kernel_blob[0:1])
        shape_of_kernel_blob = list(shape_of_kernel_blob[1:4])
        for x in shape_of_kernel_blob:
            kernel_size *= x

        weight = copy_double(net.params[conv_layer][0].data)
        bias = copy_double(net.params[conv_layer][1].data)

        # get p from dropout layer
        python_param_str = eval(msg.layer[i].python_param.param_str)
        p = float(python_param_str['p'])
        scale = 1/(1-p)

        # manipulate the weights and biases over all feature maps:
        for j in xrange(number_of_feature_maps[0]):
            net.params[conv_layer][0].data[j] = weight[j] * scale
            net.params[conv_layer][1].data[j] = bias[j] * scale

        return net
项目:CityHorizon    作者:CityStreetWander    | 项目源码 | 文件源码
def bn_absorber_prototxt(model):

    # load the prototxt file as a protobuf message
    with open(model) as k:
        str1 = k.read()
    msg1 = caffe_pb2.NetParameter()
    text_format.Merge(str1, msg1)

    # search for bn layer and remove them
    for i, l in enumerate(msg1.layer):
        if l.type == "BN":
            if msg1.layer[i].name == 'bn0_1':
                continue
            if msg1.layer[i - 1].type == 'Deconvolution':
                continue
            msg1.layer.remove(l)
            msg1.layer[i].bottom.append(msg1.layer[i-1].top[0])

            if len(msg1.layer[i].bottom) == 2:
                msg1.layer[i].bottom.remove(msg1.layer[i].bottom[0])
            elif len(msg1.layer[i].bottom) == 3:
                if ('bn' in msg1.layer[i].bottom[0]) is True:  # to remove just the layers with 'bn' in the name
                    msg1.layer[i].bottom.remove(msg1.layer[i].bottom[0])
                elif ('bn' in msg1.layer[i].bottom[1]) is True:
                    msg1.layer[i].bottom.remove(msg1.layer[i].bottom[1])
                else:
                    raise Exception("no bottom blob with name 'bn' present in {} layer".format(msg1.layer[i]))

            else:
                raise Exception("bn absorber does not support more than 2 input blobs for layer {}"
                                .format(msg1.layer[i]))

            if msg1.layer[i].type == 'Upsample':
                temp = msg1.layer[i].bottom[0]
                msg1.layer[i].bottom[0] = msg1.layer[i].bottom[1]
                msg1.layer[i].bottom[1] = temp
                # l.bottom.append(l.top[0]) #msg1.layer[i-1].top

    return msg1
项目:CityHorizon    作者:CityStreetWander    | 项目源码 | 文件源码
def add_bias_to_conv(model, weights, out_dir):
    # load the prototxt file as a protobuf message
    with open(model) as n:
        str1 = n.read()
    msg2 = caffe_pb2.NetParameter()
    text_format.Merge(str1, msg2)

    for l2 in msg2.layer:
        if l2.type == "Convolution":
            if l2.convolution_param.bias_term is False:
                l2.convolution_param.bias_term = True
                l2.convolution_param.bias_filler.type = 'constant'
                l2.convolution_param.bias_filler.value = 0.0  # actually default value

    model_temp = os.path.join(out_dir, "model_temp.prototxt")
    print "Saving temp model..."
    with open(model_temp, 'w') as m:
        m.write(text_format.MessageToString(msg2))

    net_src = caffe.Net(model, weights, caffe.TEST)
    net_des = caffe.Net(model_temp, caffe.TEST)

    for l3 in net_src.params.keys():
        for i in range(len(net_src.params[l3])):

            net_des.params[l3][i].data[:] = net_src.params[l3][i].data[:]

    # save weights with bias
    weights_temp = os.path.join(out_dir, "weights_temp.caffemodel")
    print "Saving temp weights..."
    net_des.save(weights_temp)

    return model_temp, weights_temp
项目:DeepNet    作者:hok205    | 项目源码 | 文件源码
def draw_network(model, image_path):
    """ Draw a network and save the graph in the specified image path

        Args:
            model (str): path to the prototxt file (model definition)
            image_path (str): path where to save the image
    """

    net = caffe_pb2.NetParameter()
    text_format.Merge(open(model).read(), net)
    caffe.draw.draw_net_to_file(net, image_path, 'BT')


# In[ ]:
项目:pre-resnet-gen-caffe    作者:Cysu    | 项目源码 | 文件源码
def create_model(depth):
    model = caffe_pb2.NetParameter()
    model.name = 'ResNet_{}'.format(depth)
    configs = {
        50: [3, 4, 6, 3],
        101: [3, 4, 23, 3],
        152: [3, 8, 36, 3],
        200: [3, 24, 36, 3],
    }
    num = configs[depth]
    layers = []
    layers.append(Data('data', ['data', 'label'],
                       'examples/imagenet/ilsvrc12_train_lmdb', 32, 'train'))
    layers.append(Data('data', ['data', 'label'],
                       'examples/imagenet/ilsvrc12_val_lmdb', 25, 'test'))
    layers.append(Conv('conv1', 'data', 64, 7, 2, 3))
    layers.extend(Act('conv1', layers[-1].top[0]))
    layers.append(Pool('pool1', layers[-1].top[0], 'max', 3, 2, 0))
    layers.extend(ResLayer('conv2', layers[-1].top[0], num[0], 64, 1, 'first'))
    layers.extend(ResLayer('conv3', layers[-1].top[0], num[1], 128, 2))
    layers.extend(ResLayer('conv4', layers[-1].top[0], num[2], 256, 2))
    layers.extend(ResLayer('conv5', layers[-1].top[0], num[3], 512, 2))
    layers.extend(Act('conv5', layers[-1].top[0]))
    layers.append(Pool('pool5', layers[-1].top[0], 'ave', 7, 1, 0))
    layers.append(Linear('fc', layers[-1].top[0], 1000))
    layers.append(Loss('loss', ['fc', 'label']))
    layers.append(Accuracy('accuracy_top1', ['fc', 'label'], 1))
    layers.append(Accuracy('accuracy_top5', ['fc', 'label'], 5))
    model.layer.extend(layers)
    return model
项目:resnet-cifar10-caffe    作者:yihui-he    | 项目源码 | 文件源码
def __init__(self, name="network"):
        self.net = caffe_pb2.NetParameter()
        self.net.name = name
        self.bottom = None
        self.cur = None
        self.this = None
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def __init__(self, name="network", pt=None):
        self.net = caffe_pb2.NetParameter()
        if pt is None:
            self.net.name = name
        else:
            with open(pt, 'rt') as f:
                pb2.text_format.Merge(f.read(), self.net)
        self.bottom = None
        self.cur = None
        self.this = None

        self._layer = None
        self._bottom = None
项目:ENet    作者:TimoSaemann    | 项目源码 | 文件源码
def drop_absorber_weights(model, net):
    # load the prototxt file as a protobuf message
    with open(model) as f:
        str2 = f.read()
    msg = caffe_pb2.NetParameter()
    text_format.Merge(str2, msg)

    # iterate over all layers of the network
    for i, layer in enumerate(msg.layer):

        if not layer.type == 'Python':
            continue

        conv_layer = msg.layer[i - 2].name  # conv layers are always two layers behind dropout

        # get some necessary sizes
        kernel_size = 1
        shape_of_kernel_blob = net.params[conv_layer][0].data.shape
        number_of_feature_maps = list(shape_of_kernel_blob[0:1])
        shape_of_kernel_blob = list(shape_of_kernel_blob[1:4])
        for x in shape_of_kernel_blob:
            kernel_size *= x

        weight = copy_double(net.params[conv_layer][0].data)
        bias = copy_double(net.params[conv_layer][1].data)

        # get p from dropout layer
        python_param_str = eval(msg.layer[i].python_param.param_str)
        p = float(python_param_str['p'])
        scale = 1/(1-p)

        # manipulate the weights and biases over all feature maps:
        for j in xrange(number_of_feature_maps[0]):
            net.params[conv_layer][0].data[j] = weight[j] * scale
            net.params[conv_layer][1].data[j] = bias[j] * scale

        return net
项目:ENet    作者:TimoSaemann    | 项目源码 | 文件源码
def bn_absorber_prototxt(model):

    # load the prototxt file as a protobuf message
    with open(model) as k:
        str1 = k.read()
    msg1 = caffe_pb2.NetParameter()
    text_format.Merge(str1, msg1)

    # search for bn layer and remove them
    for i, l in enumerate(msg1.layer):
        if l.type == "BN":
            if msg1.layer[i].name == 'bn0_1':
                continue
            if msg1.layer[i - 1].type == 'Deconvolution':
                continue
            msg1.layer.remove(l)
            msg1.layer[i].bottom.append(msg1.layer[i-1].top[0])

            if len(msg1.layer[i].bottom) == 2:
                msg1.layer[i].bottom.remove(msg1.layer[i].bottom[0])
            elif len(msg1.layer[i].bottom) == 3:
                if ('bn' in msg1.layer[i].bottom[0]) is True:  # to remove just the layers with 'bn' in the name
                    msg1.layer[i].bottom.remove(msg1.layer[i].bottom[0])
                elif ('bn' in msg1.layer[i].bottom[1]) is True:
                    msg1.layer[i].bottom.remove(msg1.layer[i].bottom[1])
                else:
                    raise Exception("no bottom blob with name 'bn' present in {} layer".format(msg1.layer[i]))

            else:
                raise Exception("bn absorber does not support more than 2 input blobs for layer {}"
                                .format(msg1.layer[i]))

            if msg1.layer[i].type == 'Upsample':
                temp = msg1.layer[i].bottom[0]
                msg1.layer[i].bottom[0] = msg1.layer[i].bottom[1]
                msg1.layer[i].bottom[1] = temp
                # l.bottom.append(l.top[0]) #msg1.layer[i-1].top

    return msg1
项目:ENet    作者:TimoSaemann    | 项目源码 | 文件源码
def add_bias_to_conv(model, weights, out_dir):
    # load the prototxt file as a protobuf message
    with open(model) as n:
        str1 = n.read()
    msg2 = caffe_pb2.NetParameter()
    text_format.Merge(str1, msg2)

    for l2 in msg2.layer:
        if l2.type == "Convolution":
            if l2.convolution_param.bias_term is False:
                l2.convolution_param.bias_term = True
                l2.convolution_param.bias_filler.type = 'constant'
                l2.convolution_param.bias_filler.value = 0.0  # actually default value

    model_temp = os.path.join(out_dir, "model_temp.prototxt")
    print "Saving temp model..."
    with open(model_temp, 'w') as m:
        m.write(text_format.MessageToString(msg2))

    net_src = caffe.Net(model, weights, caffe.TEST)
    net_des = caffe.Net(model_temp, caffe.TEST)

    for l3 in net_src.params.keys():
        for i in range(len(net_src.params[l3])):

            net_des.params[l3][i].data[:] = net_src.params[l3][i].data[:]

    # save weights with bias
    weights_temp = os.path.join(out_dir, "weights_temp.caffemodel")
    print "Saving temp weights..."
    net_des.save(weights_temp)

    return model_temp, weights_temp
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def draw_net(net_proto_file, out_img_file, style = 'TB'):
  """
  draw cnn network into image.
  IN:  net_proto_file   net definition file
  IN:  style            'TB' for top-> bottom, 'LR' for lelf->right
  OUT: out_img_file     output image
  """
  net = caffe_pb2.NetParameter()
  text_format.Merge(open(net_proto_file).read(), net)
  if not net.name:
    net.name = 'cnn_net'
  print('\nDrawing net to %s' % out_img_file)
  caffe.draw.draw_net_to_file(net, out_img_file, style)
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def draw_net(net_proto_file, out_img_file, style = 'TB'):
  """
  draw cnn network into image.
  IN:  net_proto_file   net definition file
  IN:  style            'TB' for top-> bottom, 'LR' for lelf->right
  OUT: out_img_file     output image
  """
  net = caffe_pb2.NetParameter()
  text_format.Merge(open(net_proto_file).read(), net)
  if not net.name:
    net.name = 'cnn_net'
  print('\nDrawing net to %s' % out_img_file)
  caffe.draw.draw_net_to_file(net, out_img_file, style)
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def draw_net(net_proto_file, out_img_file, style = 'TB'):
  """
  draw cnn network into image.
  IN:  net_proto_file   net definition file
  IN:  style            'TB' for top-> bottom, 'LR' for lelf->right
  OUT: out_img_file     output image
  """
  net = caffe_pb2.NetParameter()
  text_format.Merge(open(net_proto_file).read(), net)
  if not net.name:
    net.name = 'cnn_net'
  print('\nDrawing net to %s' % out_img_file)
  caffe.draw.draw_net_to_file(net, out_img_file, style)
项目:DepthSegnet    作者:hari-sikchi    | 项目源码 | 文件源码
def bn_absorber_prototxt(model):

    # load the prototxt file as a protobuf message
    with open(model) as k:
        str1 = k.read()
    msg1 = caffe_pb2.NetParameter()
    text_format.Merge(str1, msg1)

    # search for bn layer and remove them
    for l in msg1.layer:
        if l.type == "BN":
            msg1.layer.remove(l)

    return msg1
项目:seglink    作者:dengdan    | 项目源码 | 文件源码
def __init__(self):
        print('Loading Caffe file:', caffemodel_path)
        caffemodel_params = caffe_pb2.NetParameter()
        caffemodel_str = open(caffemodel_path, 'rb').read()
        caffemodel_params.ParseFromString(caffemodel_str)
        caffe_layers = caffemodel_params.layer
        self.layers = []
        self.counter = 0
        self.bgr_to_rgb = False
        for layer in caffe_layers:
            if layer.type == 'Convolution':
                self.layers.append(layer)
项目:tv-ad-classification    作者:Abhinav95    | 项目源码 | 文件源码
def read_net(filename):
    net = caffe_pb2.NetParameter()
    with open(filename) as f:
        protobuf.text_format.Parse(f.read(), net)
    return net
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def generate_nbn_caffemodel(input_prototxt, input_caffemodel, output_prototxt, output_caffemodel, eps = 0.00001):
    input_network = caffe.Net(input_prototxt, input_caffemodel, caffe.TEST)

    f = open(input_caffemodel, 'rb')
    tmp_model = caffe_pb2.NetParameter()
    tmp_model.ParseFromString(f.read())
    f.close()
    layers = tmp_model.layer

    output_network = caffe.Net(output_prototxt, caffe.TEST)

    for i in range(len(layers)):
        if layers[i].type == "Input" or layers[i].type == "Eltwise" or layers[i].type == "Scale" or layers[i].type == "BatchNorm" or layers[i].type == "ImageData" or layers[i].type == "ReLU" or layers[i].type == "Pooling" or layers[i].type == "Split" or layers[i].type == "Concat" or  layers[i].type == "Flatten" or layers[i].type == "SoftmaxWithLoss":
            continue
        elif layers[i].type == "Convolution":
            if not (layers[i+2].type == "Scale" and layers[i+1].type == "BatchNorm"):
                continue
            bn_conv = layers[i+1].name
            scale_conv = layers[i+2].name
            conv_w = input_network.params[layers[i].name][0].data[...]
            print layers[i].name, layers[i+1].name, layers[i+2].name, layers[i+2].scale_param.bias_term, layers[i].convolution_param.bias_term, conv_w.shape
            if layers[i].convolution_param.bias_term:
                # original conv
                conv_b = input_network.params[layers[i].name][1].data[...]
            else:
                conv_b = np.zeros((conv_w.shape[0],), dtype=np.uint8)

            # original batchnormal
            scale = input_network.params[bn_conv][2].data[...]
            mean = input_network.params[bn_conv][0].data[...]
            var = input_network.params[bn_conv][1].data[...]

            # original scale
            scale_w = input_network.params[scale_conv][0].data[...]
            scale_b = input_network.params[scale_conv][1].data[...]
            #print "scale_w:", scale_w

            # calculate
            var = np.sqrt(var/scale+eps)
            conv_b = conv_b-mean/scale
            conv_b = conv_b/var
            var = scale_w/var
            conv_b = scale_w*conv_b
            conv_b = conv_b + scale_b

            for j in range(len(var)):
                output_network.params[layers[i].name][0].data[j] = var[j]*conv_w[j]
            output_network.params[layers[i].name][1].data[...] = conv_b
        else:
            output_network.params[layers[i].name][0].data[...] = input_network.params[layers[i].name][0].data[...]
            output_network.params[layers[i].name][1].data[...] = input_network.params[layers[i].name][1].data[...]    

    output_network.save(output_caffemodel)
项目:CityHorizon    作者:CityStreetWander    | 项目源码 | 文件源码
def bn_absorber_weights(model, weights):

    # load the prototxt file as a protobuf message
    with open(model) as f:
        str2 = f.read()
    msg = caffe_pb2.NetParameter()
    text_format.Merge(str2, msg)

    # load net
    net = caffe.Net(model, weights, caffe.TEST)

    # iterate over all layers of the network
    for i, layer in enumerate(msg.layer):

        if not layer.type == 'BN':
            continue

        # check the special case that the bn layer comes right after concat layer
        if msg.layer[i].name == 'bn0_1':
            continue

        if msg.layer[i - 1].type == 'Deconvolution':  # do not merge into deconv layer
            continue

        bn_layer = msg.layer[i].name
        conv_layer = msg.layer[i - 1].name

        # get some necessary sizes
        kernel_size = 1
        shape_of_kernel_blob = net.params[conv_layer][0].data.shape
        number_of_feature_maps = list(shape_of_kernel_blob[0:1])
        shape_of_kernel_blob = list(shape_of_kernel_blob[1:4])
        for x in shape_of_kernel_blob:
            kernel_size *= x

        weight = copy_double(net.params[conv_layer][0].data)
        bias = copy_double(net.params[conv_layer][1].data)

        # receive new_gamma and new_beta which was already calculated by the compute_bn_statistics.py script
        new_gamma = net.params[bn_layer][0].data[...]
        new_beta = net.params[bn_layer][1].data[...]

        # manipulate the weights and biases over all feature maps:
        # weight_new = weight * gamma_new
        # bias_new = bias * gamma_new + beta_new
        # for more information see https://github.com/alexgkendall/caffe-segnet/issues/109
        for j in xrange(number_of_feature_maps[0]):

            net.params[conv_layer][0].data[j] = weight[j] * np.repeat(new_gamma.item(j), kernel_size).reshape(
                net.params[conv_layer][0].data[j].shape)
            net.params[conv_layer][1].data[j] = bias[j] * new_gamma.item(j) + new_beta.item(j)

        # set the no longer needed bn params to zero
        net.params[bn_layer][0].data[:] = 0
        net.params[bn_layer][1].data[:] = 0

    return net
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def convertBN(inmodel, outmodel):
    """ subsume all the BN layers inside inmode to normal layers in the out model """

    # load files
    print 'Loading caffemodel: {}'.format(inmodel)
    with open(inmodel, 'rb') as f:
        binary_content = f.read()

    protobuf = caffe_pb2.NetParameter()
    protobuf.ParseFromString(binary_content)
    layers = protobuf.layer

    _eps = 1e-5
    for layer in layers:
        if layer.type == 'BatchNorm':
            # the layer to be modified. 
            layer_c = [l for l in layers if l.name == layer.name[3:]][0]
            # the parameters fo the computational layer
            w = np.reshape(np.array(layer_c.blobs[0].data), layer_c.blobs[0].shape.dim) 
            b = np.reshape(np.array(layer_c.blobs[1].data), layer_c.blobs[1].shape.dim)
            # load the BN parameters
            factor = 0 if np.array(layer.blobs[2].data) == 0 else 1./np.array(layer.blobs[2].data)
            mean = np.array(layer.blobs[0].data) * factor
            var = np.array(layer.blobs[1].data) * factor

            # display information
            print 'Modifying layer {} based on information from {}'.format(layer_c.name, layer.name)
            # update weights
            if len(w.shape) == 4: 
                w /= (_eps + np.sqrt(var)[:, np.newaxis, np.newaxis, np.newaxis])
            elif len(w.shape) == 2:
                w /= (_eps + np.sqrt(var)[:, np.newaxis])
            # update bias
            b -= mean
            b /= (_eps + np.sqrt(var))
            # save the changes back to the model
            del layer_c.blobs[0].data[:]
            del layer_c.blobs[1].data[:]
            layer_c.blobs[0].data.extend(w.flatten().tolist())
            layer_c.blobs[1].data.extend(b.flatten().tolist())

    # save the model to out model
    new_binary_content = protobuf.SerializeToString()

    print 'Saving caffemodel: {}'.format(outmodel)
    with open(outmodel, 'wb') as f:
        f.write(new_binary_content)
项目:toothless    作者:ratt-ru    | 项目源码 | 文件源码
def get_transformer(deploy_file, mean_file=None):
    """
    Returns an instance of caffe.io.Transformer

    Arguments:
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    mean_file -- path to a .binaryproto file (optional)
    """
    network = caffe_pb2.NetParameter()
    with open(deploy_file) as infile:
        text_format.Merge(infile.read(), network)

    if network.input_shape:

        dims = network.input_shape[0].dim
    else:
        dims = network.input_dim[:4]


    #dims = network.input_dim

    t = caffe.io.Transformer(
            inputs = {'data': dims}
            )
    t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)

    # color images
    if dims[1] == 3:
        # channel swap
        t.set_channel_swap('data', (2,1,0))

    if mean_file:
        # set mean pixel
        with open(mean_file) as infile:
            blob = caffe_pb2.BlobProto()
            blob.MergeFromString(infile.read())
            if blob.HasField('shape'):
                blob_dims = blob.shape
                assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
            elif blob.HasField('num') and blob.HasField('channels') and \
                    blob.HasField('height') and blob.HasField('width'):
                blob_dims = (blob.num, blob.channels, blob.height, blob.width)
            else:
                raise ValueError('blob does not provide shape or 4d dimensions')
            pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
            t.set_mean('data', pixel)

    return t

# Load image to caffe
项目:GTSRB-caffe-model    作者:magnusja    | 项目源码 | 文件源码
def get_transformer(deploy_file, mean_file=None):
    """
    Returns an instance of caffe.io.Transformer

    Arguments:
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    mean_file -- path to a .binaryproto file (optional)
    """
    network = caffe_pb2.NetParameter()
    with open(deploy_file) as infile:
        text_format.Merge(infile.read(), network)

    if network.input_shape:
        dims = network.input_shape[0].dim
    else:
        dims = network.input_dim[:4]

    t = caffe.io.Transformer(
            inputs = {'data': dims}
            )
    t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)

    # color images
    if dims[1] == 3:
        # channel swap
        t.set_channel_swap('data', (2,1,0))

    if mean_file:
        # set mean pixel
        with open(mean_file,'rb') as infile:
            blob = caffe_pb2.BlobProto()
            blob.MergeFromString(infile.read())
            if blob.HasField('shape'):
                blob_dims = blob.shape
                assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
            elif blob.HasField('num') and blob.HasField('channels') and \
                    blob.HasField('height') and blob.HasField('width'):
                blob_dims = (blob.num, blob.channels, blob.height, blob.width)
            else:
                raise ValueError('blob does not provide shape or 4d dimensions')
            pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
            t.set_mean('data', pixel)

    return t
项目:Barebones-Flask-and-Caffe-Classifier    作者:alex-paterson    | 项目源码 | 文件源码
def get_transformer(deploy_file, mean_file=None):
    """
    Returns an instance of caffe.io.Transformer

    Arguments:
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    mean_file -- path to a .binaryproto file (optional)
    """
    network = caffe_pb2.NetParameter()
    with open(deploy_file) as infile:
        text_format.Merge(infile.read(), network)

    if network.input_shape:
        dims = network.input_shape[0].dim
    else:
        dims = network.input_dim[:4]

    t = caffe.io.Transformer(
            inputs = {'data': dims}
            )
    t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)

    # color images
    if dims[1] == 3:
        # channel swap
        t.set_channel_swap('data', (2,1,0))

    if mean_file:
        # set mean pixel
        with open(mean_file,'rb') as infile:
            blob = caffe_pb2.BlobProto()
            blob.MergeFromString(infile.read())
            if blob.HasField('shape'):
                blob_dims = blob.shape
                assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
            elif blob.HasField('num') and blob.HasField('channels') and \
                    blob.HasField('height') and blob.HasField('width'):
                blob_dims = (blob.num, blob.channels, blob.height, blob.width)
            else:
                raise ValueError('blob does not provide shape or 4d dimensions')
            pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
            t.set_mean('data', pixel)

    return t
项目:ENet    作者:TimoSaemann    | 项目源码 | 文件源码
def bn_absorber_weights(model, weights):

    # load the prototxt file as a protobuf message
    with open(model) as f:
        str2 = f.read()
    msg = caffe_pb2.NetParameter()
    text_format.Merge(str2, msg)

    # load net
    net = caffe.Net(model, weights, caffe.TEST)

    # iterate over all layers of the network
    for i, layer in enumerate(msg.layer):

        if not layer.type == 'BN':
            continue

        # check the special case that the bn layer comes right after concat layer
        if msg.layer[i].name == 'bn0_1':
            continue

        if msg.layer[i - 1].type == 'Deconvolution':  # do not merge into deconv layer
            continue

        bn_layer = msg.layer[i].name
        conv_layer = msg.layer[i - 1].name

        # get some necessary sizes
        kernel_size = 1
        shape_of_kernel_blob = net.params[conv_layer][0].data.shape
        number_of_feature_maps = list(shape_of_kernel_blob[0:1])
        shape_of_kernel_blob = list(shape_of_kernel_blob[1:4])
        for x in shape_of_kernel_blob:
            kernel_size *= x

        weight = copy_double(net.params[conv_layer][0].data)
        bias = copy_double(net.params[conv_layer][1].data)

        # receive new_gamma and new_beta which was already calculated by the compute_bn_statistics.py script
        new_gamma = net.params[bn_layer][0].data[...]
        new_beta = net.params[bn_layer][1].data[...]

        # manipulate the weights and biases over all feature maps:
        # weight_new = weight * gamma_new
        # bias_new = bias * gamma_new + beta_new
        # for more information see https://github.com/alexgkendall/caffe-segnet/issues/109
        for j in xrange(number_of_feature_maps[0]):

            net.params[conv_layer][0].data[j] = weight[j] * np.repeat(new_gamma.item(j), kernel_size).reshape(
                net.params[conv_layer][0].data[j].shape)
            net.params[conv_layer][1].data[j] = bias[j] * new_gamma.item(j) + new_beta.item(j)

        # set the no longer needed bn params to zero
        net.params[bn_layer][0].data[:] = 0
        net.params[bn_layer][1].data[:] = 0

    return net
项目:DepthSegnet    作者:hari-sikchi    | 项目源码 | 文件源码
def bn_absorber_weights(model, weights):

    # load the prototxt file as a protobuf message
    with open(model) as f:
        str2 = f.read()
    msg = caffe_pb2.NetParameter()
    text_format.Merge(str2, msg)

    # load net
    net = caffe.Net(model, weights, caffe.TEST)

    # iterate over all layers of the network
    for i, layer in enumerate(msg.layer):

        # check if conv layer exist right before bn layer, otherwise merging is not possible and skip
        if not layer.type == 'BN':
            continue
        if not msg.layer[i-1].type == 'Convolution':
            continue

        # get the name of the bn and conv layer
        bn_layer = msg.layer[i].name
        conv_layer = msg.layer[i-1].name

        # get some necessary sizes
        kernel_size = 1
        shape_of_kernel_blob = net.params[conv_layer][0].data.shape
        number_of_feature_maps = list(shape_of_kernel_blob[0:1])
        shape_of_kernel_blob = list(shape_of_kernel_blob[1:4])
        for x in shape_of_kernel_blob:
            kernel_size *= x

        weight = copy_double(net.params[conv_layer][0].data)
        bias = copy_double(net.params[conv_layer][1].data)

        # receive new_gamma and new_beta which was already calculated by the compute_bn_statistics.py script
        new_gamma = net.params[bn_layer][0].data[...]
        new_beta = net.params[bn_layer][1].data[...]

        # manipulate the weights and biases over all feature maps:
        # weight_new = weight * gamma_new
        # bias_new = bias * gamma_new + beta_new
        # for more information see https://github.com/alexgkendall/caffe-segnet/issues/109
        for j in xrange(number_of_feature_maps[0]):

            net.params[conv_layer][0].data[j] = weight[j] * np.repeat(new_gamma.item(j), kernel_size).reshape(
                net.params[conv_layer][0].data[j].shape)
            net.params[conv_layer][1].data[j] = bias[j] * new_gamma.item(j) + new_beta.item(j)

        # set the no longer needed bn params to zero
        net.params[bn_layer][0].data[:] = 0
        net.params[bn_layer][1].data[:] = 0

    return net
项目:WasIstDasFuer1Drone    作者:magnusja    | 项目源码 | 文件源码
def get_transformer(deploy_file, mean_file=None):
    """
    Returns an instance of caffe.io.Transformer

    Arguments:
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    mean_file -- path to a .binaryproto file (optional)
    """
    network = caffe_pb2.NetParameter()
    with open(deploy_file) as infile:
        text_format.Merge(infile.read(), network)

    if network.input_shape:
        dims = network.input_shape[0].dim
    else:
        dims = network.input_dim[:4]

    t = caffe.io.Transformer(
            inputs = {'data': dims}
            )
    t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)

    # color images
    if dims[1] == 3:
        # channel swap
        t.set_channel_swap('data', (2,1,0))

    if mean_file:
        # set mean pixel
        with open(mean_file,'rb') as infile:
            blob = caffe_pb2.BlobProto()
            blob.MergeFromString(infile.read())
            if blob.HasField('shape'):
                blob_dims = blob.shape
                assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
            elif blob.HasField('num') and blob.HasField('channels') and \
                    blob.HasField('height') and blob.HasField('width'):
                blob_dims = (blob.num, blob.channels, blob.height, blob.width)
            else:
                raise ValueError('blob does not provide shape or 4d dimensions')
            pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
            t.set_mean('data', pixel)

    return t
项目:WasIstDasFuer1Drone    作者:magnusja    | 项目源码 | 文件源码
def get_transformer(deploy_file, mean_file=None):
    """
    Returns an instance of caffe.io.Transformer

    Arguments:
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    mean_file -- path to a .binaryproto file (optional)
    """
    network = caffe_pb2.NetParameter()
    with open(deploy_file) as infile:
        text_format.Merge(infile.read(), network)

    if network.input_shape:
        dims = network.input_shape[0].dim
    else:
        dims = network.input_dim[:4]

    t = caffe.io.Transformer(
            inputs = {'data': dims}
            )
    t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)

    # color images
    if dims[1] == 3:
        # channel swap
        t.set_channel_swap('data', (2,1,0))

    if mean_file:
        # set mean pixel
        with open(mean_file,'rb') as infile:
            blob = caffe_pb2.BlobProto()
            blob.MergeFromString(infile.read())
            if blob.HasField('shape'):
                blob_dims = blob.shape
                assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
            elif blob.HasField('num') and blob.HasField('channels') and \
                    blob.HasField('height') and blob.HasField('width'):
                blob_dims = (blob.num, blob.channels, blob.height, blob.width)
            else:
                raise ValueError('blob does not provide shape or 4d dimensions')
            pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
            t.set_mean('data', pixel)

    return t