Python mxnet 模块,symbol() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用mxnet.symbol()

项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    bias = mx.symbol.Variable(name="conv{}_bias".format(name),
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu
项目:mxnet-ssd    作者:zhreshold    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    bias = mx.symbol.Variable(name="{}_conv_bias".format(name),   
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}_conv".format(name), bias=bias)
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(name, act_type))
    return relu
项目:mxnet-ssd    作者:zhreshold    | 项目源码 | 文件源码
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    bias = mx.symbol.Variable(name="conv{}_bias".format(name),
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu
项目:mxnet-101    作者:burness    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def sym_factory(node, data):
  name = node['name']
  params = {}
  if 'param' in node:    
    for k, v in node['param'].items():
      try:
        params[k] = ast.literal_eval(v)
      except ValueError, e:
        params[k] = v
  return getattr(mx.symbol, node['op'])(data=data, name=name, **params)
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}_conv".format(name))
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(name, act_type))
    return relu
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    bias = mx.symbol.Variable(name="conv{}_bias".format(name),
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu
项目:mxnet-yolo    作者:zhreshold    | 项目源码 | 文件源码
def get_symbol(num_classes=20, nms_thresh=0.5, force_nms=False, **kwargs):
    body = resnet.get_symbol(num_classes, 50, '3,224,224')
    conv1 = body.get_internals()['_plus12_output']
    conv2 = body.get_internals()['_plus15_output']
    # anchors
    anchors = [1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071]
    num_anchor = len(anchors) // 2

    # extra layers
    conv7_1 = conv_act_layer(conv2, 'conv7_1', 1024, kernel=(3, 3), pad=(1, 1),
        act_type='leaky')
    conv7_2 = conv_act_layer(conv7_1, 'conv7_2', 1024, kernel=(3, 3), pad=(1, 1),
        act_type='leaky')

    # re-organize
    conv5_6 = mx.sym.stack_neighbor(data=conv1, kernel=(2, 2), name='stack_downsample')
    concat = mx.sym.Concat(*[conv5_6, conv7_2], dim=1)
    # concat = conv7_2
    conv8_1 = conv_act_layer(concat, 'conv8_1', 1024, kernel=(3, 3), pad=(1, 1),
        act_type='leaky')
    pred = mx.symbol.Convolution(data=conv8_1, name='conv_pred', kernel=(1, 1),
        num_filter=num_anchor * (num_classes + 4 + 1))

    out = mx.contrib.symbol.YoloOutput(data=pred, num_class=num_classes,
        num_anchor=num_anchor, object_grad_scale=5.0, background_grad_scale=1.0,
        coord_grad_scale=1.0, class_grad_scale=1.0, anchors=anchors,
        nms_topk=400, warmup_samples=12800, name='yolo_output')
    return out
项目:mxnet-yolo    作者:zhreshold    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="bn{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    return relu
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def replace_conv_layer(layer_name, old_model, sym_handle, arg_handle):
  conf = json.loads(old_model.symbol.tojson())
  sym_dict = {}
  nodes = conf['nodes']
  nodes = topsort(nodes)
  res_sym = None
  new_model = old_model  
  for i,node in enumerate(nodes):
    sym = None    
    if is_input(node):
      sym = mx.symbol.Variable(name='data')
    elif node['op'] != 'null':
      input_nodes = [nodes[int(j[0])] for j in node['inputs']]
      datas = [input_node['name'] for input_node in input_nodes\
                                  if not input_node['name'].startswith(node['name'])]
      try:
        data=sym_dict[datas[0]]
      except Exception, e:
        print 'can not find symbol %s'%(datas[0])
        raise e    
      if node['name'] == layer_name:
        sym = sym_handle(data, node)          
      else:
        sym = sym_factory(node, data)        
    if sym:
      sym_dict[node['name']] = sym
      res_sym = sym

  arg_params = copy.deepcopy(old_model.arg_params)
  if layer_name:  
    arg_shapes, _, _ = res_sym.infer_shape(data=(1,3,224,224))
    arg_names = res_sym.list_arguments()
    arg_shape_dic = dict(zip(arg_names, arg_shapes))
    try:
      arg_handle(arg_shape_dic, arg_params)
    except Exception, e:
      raise Exception('Exception in arg_handle')

  new_model = mx.model.FeedForward(
                symbol=res_sym,
                ctx=old_model.ctx,
                num_epoch=1,                                
                epoch_size=old_model.epoch_size,
                optimizer='sgd',
                initializer=old_model.initializer,
                numpy_batch_size=old_model.numpy_batch_size,
                arg_params=arg_params,
                aux_params=old_model.aux_params,
                allow_extra_params=True,
                begin_epoch=old_model.begin_epoch)  
  return new_model
项目:mxnet-yolo    作者:zhreshold    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(3, 3), pad=(1, 1), \
    stride=(1,1), act_type="relu", use_batchnorm=True):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}".format(name))
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="bn_{}".format(name))
    if act_type in ['elu', 'leaky', 'prelu', 'rrelu']:
        relu = mx.symbol.LeakyReLU(data=conv, act_type=act_type,
        name="{}_{}".format(act_type, name), slope=0.1)
    elif act_type in ['relu', 'sigmoid', 'softrelu', 'tanh']:
        relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(act_type, name))
    else:
        assert isinstance(act_type, str)
        raise ValueError("Invalid activation type: " + str(act_type))
    return relu
项目:mxnet-yolo    作者:zhreshold    | 项目源码 | 文件源码
def conv_act_layer(from_layer, name, num_filter, kernel=(3, 3), pad=(1, 1), \
    stride=(1,1), act_type="relu", use_batchnorm=True):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}".format(name))
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="bn_{}".format(name))
    if act_type in ['elu', 'leaky', 'prelu', 'rrelu']:
        relu = mx.symbol.LeakyReLU(data=conv, act_type=act_type,
        name="{}_{}".format(act_type, name), slope=0.1)
    elif act_type in ['relu', 'sigmoid', 'softrelu', 'tanh']:
        relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(act_type, name))
    else:
        assert isinstance(act_type, str)
        raise ValueError("Invalid activation type: " + str(act_type))
    return relu