失眠网,内容丰富有趣,生活中的好帮手!
失眠网 > YOLOv3: An Incremental Improvement

YOLOv3: An Incremental Improvement

时间:2020-02-20 08:17:14

相关推荐

YOLOv3: An Incremental Improvement

具体代码见: /ultralytics/yolov3

def create_modules(module_defs):"""Constructs module list of layer blocks from module configuration in module_defs"""hyperparams = module_defs.pop(0)output_filters = [int(hyperparams['channels'])]module_list = nn.ModuleList()yolo_layer_count = 0for i, module_def in enumerate(module_defs):modules = nn.Sequential()if module_def['type'] == 'convolutional':bn = int(module_def['batch_normalize'])filters = int(module_def['filters'])kernel_size = int(module_def['size'])pad = (kernel_size - 1) // 2 if int(module_def['pad']) else 0modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=output_filters[-1],out_channels=filters,kernel_size=kernel_size,stride=int(module_def['stride']),padding=pad,bias=not bn))if bn:modules.add_module('batch_norm_%d' % i, nn.BatchNorm2d(filters))if module_def['activation'] == 'leaky':modules.add_module('leaky_%d' % i, nn.LeakyReLU(0.1, inplace=True))elif module_def['type'] == 'maxpool':kernel_size = int(module_def['size'])stride = int(module_def['stride'])if kernel_size == 2 and stride == 1:modules.add_module('_debug_padding_%d' % i, nn.ZeroPad2d((0, 1, 0, 1)))maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))modules.add_module('maxpool_%d' % i, maxpool)elif module_def['type'] == 'upsample':# upsample = nn.Upsample(scale_factor=int(module_def['stride']), mode='nearest') # WARNING: deprecatedupsample = Upsample(scale_factor=int(module_def['stride']))modules.add_module('upsample_%d' % i, upsample)elif module_def['type'] == 'route':layers = [int(x) for x in module_def['layers'].split(',')]filters = sum([output_filters[i + 1 if i > 0 else i] for i in layers])modules.add_module('route_%d' % i, EmptyLayer())elif module_def['type'] == 'shortcut':filters = output_filters[int(module_def['from'])]modules.add_module('shortcut_%d' % i, EmptyLayer())elif module_def['type'] == 'yolo':anchor_idxs = [int(x) for x in module_def['mask'].split(',')]# Extract anchorsanchors = [float(x) for x in module_def['anchors'].split(',')]anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]anchors = [anchors[i] for i in anchor_idxs]nc = int(module_def['classes']) # number of classesimg_size = hyperparams['height']# Define detection layeryolo_layer = YOLOLayer(anchors, nc, img_size, yolo_layer_count, cfg=hyperparams['cfg'])modules.add_module('yolo_%d' % i, yolo_layer)yolo_layer_count += 1# Register module list and number of output filtersmodule_list.append(modules)output_filters.append(filters)return hyperparams, module_list

[net]# Testingbatch=1subdivisions=1# Training# batch=64# subdivisions=16width=608height=608channels=3momentum=0.9decay=0.0005angle=0saturation = 1.5exposure = 1.5hue=.1learning_rate=0.001burn_in=1000max_batches = 500200policy=stepssteps=400000,450000scales=.1,.1[convolutional]batch_normalize=1filters=32size=3stride=1pad=1activation=leaky# Downsample[convolutional]batch_normalize=1filters=64size=3stride=2pad=1activation=leaky[convolutional]batch_normalize=1filters=32size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=64size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear# Downsample[convolutional]batch_normalize=1filters=128size=3stride=2pad=1activation=leaky[convolutional]batch_normalize=1filters=64size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=128size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=64size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=128size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear# Downsample[convolutional]batch_normalize=1filters=256size=3stride=2pad=1activation=leaky[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear# Downsample[convolutional]batch_normalize=1filters=512size=3stride=2pad=1activation=leaky[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear# Downsample[convolutional]batch_normalize=1filters=1024size=3stride=2pad=1activation=leaky[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=1024size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=1024size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=1024size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1filters=1024size=3stride=1pad=1activation=leaky[shortcut]from=-3activation=linear######################[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=1024activation=leaky[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky### SPP ###[maxpool]stride=1size=5[route]layers=-2[maxpool]stride=1size=9[route]layers=-4[maxpool]stride=1size=13[route]layers=-1,-3,-5,-6### End SPP ###[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=1024activation=leaky[convolutional]batch_normalize=1filters=512size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=1024activation=leaky[convolutional]size=1stride=1pad=1filters=255activation=linear[yolo]mask = 6,7,8anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326classes=80num=9jitter=.3ignore_thresh = .7truth_thresh = 1random=1[route]layers = -4[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[upsample]stride=2[route]layers = -1, 61[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=512activation=leaky[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=512activation=leaky[convolutional]batch_normalize=1filters=256size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=512activation=leaky[convolutional]size=1stride=1pad=1filters=255activation=linear[yolo]mask = 3,4,5anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326classes=80num=9jitter=.3ignore_thresh = .7truth_thresh = 1random=1[route]layers = -4[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[upsample]stride=2[route]layers = -1, 36[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=256activation=leaky[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=256activation=leaky[convolutional]batch_normalize=1filters=128size=1stride=1pad=1activation=leaky[convolutional]batch_normalize=1size=3stride=1pad=1filters=256activation=leaky[convolutional]size=1stride=1pad=1filters=255activation=linear[yolo]mask = 0,1,2anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326classes=80num=9jitter=.3ignore_thresh = .7truth_thresh = 1random=1

如果觉得《YOLOv3: An Incremental Improvement》对你有帮助,请点赞、收藏,并留下你的观点哦!

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。