Created
July 9, 2017 04:08
-
-
Save szm-R/2ed23859c5320f4c88061f049179c7a1 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: resnet269_v2 | |
input: "data" | |
input_shape { | |
dim: 1 | |
dim: 3 | |
dim: 320 | |
dim: 320 | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 3 | |
kernel_size: 7 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "conv1_bn" | |
type: "BatchNorm" | |
bottom: "conv1" | |
top: "conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "conv1_scale" | |
type: "Scale" | |
bottom: "conv1" | |
top: "conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv1_relu" | |
type: "ReLU" | |
bottom: "conv1" | |
top: "conv1" | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "conv1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "res1_conv1" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "res1_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res1_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res1_conv1" | |
top: "res1_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res1_conv1_scale" | |
type: "Scale" | |
bottom: "res1_conv1" | |
top: "res1_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res1_conv1_relu" | |
type: "ReLU" | |
bottom: "res1_conv1" | |
top: "res1_conv1" | |
} | |
layer { | |
name: "res1_conv2" | |
type: "Convolution" | |
bottom: "res1_conv1" | |
top: "res1_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res1_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res1_conv2" | |
top: "res1_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res1_conv2_scale" | |
type: "Scale" | |
bottom: "res1_conv2" | |
top: "res1_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res1_conv2_relu" | |
type: "ReLU" | |
bottom: "res1_conv2" | |
top: "res1_conv2" | |
} | |
layer { | |
name: "res1_conv3" | |
type: "Convolution" | |
bottom: "res1_conv2" | |
top: "res1_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res1_match_conv" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "res1_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res1_eletwise" | |
type: "Eltwise" | |
bottom: "res1_match_conv" | |
bottom: "res1_conv3" | |
top: "res1_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res2_bn" | |
type: "BatchNorm" | |
bottom: "res1_eletwise" | |
top: "res2_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res2_scale" | |
type: "Scale" | |
bottom: "res2_bn" | |
top: "res2_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2_relu" | |
type: "ReLU" | |
bottom: "res2_bn" | |
top: "res2_bn" | |
} | |
layer { | |
name: "res2_conv1" | |
type: "Convolution" | |
bottom: "res2_bn" | |
top: "res2_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res2_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res2_conv1" | |
top: "res2_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res2_conv1_scale" | |
type: "Scale" | |
bottom: "res2_conv1" | |
top: "res2_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2_conv1_relu" | |
type: "ReLU" | |
bottom: "res2_conv1" | |
top: "res2_conv1" | |
} | |
layer { | |
name: "res2_conv2" | |
type: "Convolution" | |
bottom: "res2_conv1" | |
top: "res2_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res2_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res2_conv2" | |
top: "res2_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res2_conv2_scale" | |
type: "Scale" | |
bottom: "res2_conv2" | |
top: "res2_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2_conv2_relu" | |
type: "ReLU" | |
bottom: "res2_conv2" | |
top: "res2_conv2" | |
} | |
layer { | |
name: "res2_conv3" | |
type: "Convolution" | |
bottom: "res2_conv2" | |
top: "res2_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res2_eletwise" | |
type: "Eltwise" | |
bottom: "res1_eletwise" | |
bottom: "res2_conv3" | |
top: "res2_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res3_bn" | |
type: "BatchNorm" | |
bottom: "res2_eletwise" | |
top: "res3_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res3_scale" | |
type: "Scale" | |
bottom: "res3_bn" | |
top: "res3_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3_relu" | |
type: "ReLU" | |
bottom: "res3_bn" | |
top: "res3_bn" | |
} | |
layer { | |
name: "res3_conv1" | |
type: "Convolution" | |
bottom: "res3_bn" | |
top: "res3_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res3_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res3_conv1" | |
top: "res3_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res3_conv1_scale" | |
type: "Scale" | |
bottom: "res3_conv1" | |
top: "res3_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3_conv1_relu" | |
type: "ReLU" | |
bottom: "res3_conv1" | |
top: "res3_conv1" | |
} | |
layer { | |
name: "res3_conv2" | |
type: "Convolution" | |
bottom: "res3_conv1" | |
top: "res3_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res3_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res3_conv2" | |
top: "res3_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res3_conv2_scale" | |
type: "Scale" | |
bottom: "res3_conv2" | |
top: "res3_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3_conv2_relu" | |
type: "ReLU" | |
bottom: "res3_conv2" | |
top: "res3_conv2" | |
} | |
layer { | |
name: "res3_conv3" | |
type: "Convolution" | |
bottom: "res3_conv2" | |
top: "res3_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res3_eletwise" | |
type: "Eltwise" | |
bottom: "res2_eletwise" | |
bottom: "res3_conv3" | |
top: "res3_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res4_bn" | |
type: "BatchNorm" | |
bottom: "res3_eletwise" | |
top: "res4_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res4_scale" | |
type: "Scale" | |
bottom: "res4_bn" | |
top: "res4_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4_relu" | |
type: "ReLU" | |
bottom: "res4_bn" | |
top: "res4_bn" | |
} | |
layer { | |
name: "res4_conv1" | |
type: "Convolution" | |
bottom: "res4_bn" | |
top: "res4_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res4_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res4_conv1" | |
top: "res4_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res4_conv1_scale" | |
type: "Scale" | |
bottom: "res4_conv1" | |
top: "res4_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4_conv1_relu" | |
type: "ReLU" | |
bottom: "res4_conv1" | |
top: "res4_conv1" | |
} | |
layer { | |
name: "res4_conv2" | |
type: "Convolution" | |
bottom: "res4_conv1" | |
top: "res4_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res4_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res4_conv2" | |
top: "res4_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res4_conv2_scale" | |
type: "Scale" | |
bottom: "res4_conv2" | |
top: "res4_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4_conv2_relu" | |
type: "ReLU" | |
bottom: "res4_conv2" | |
top: "res4_conv2" | |
} | |
layer { | |
name: "res4_conv3" | |
type: "Convolution" | |
bottom: "res4_conv2" | |
top: "res4_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res4_match_conv" | |
type: "Convolution" | |
bottom: "res4_bn" | |
top: "res4_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res4_eletwise" | |
type: "Eltwise" | |
bottom: "res4_match_conv" | |
bottom: "res4_conv3" | |
top: "res4_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res5_bn" | |
type: "BatchNorm" | |
bottom: "res4_eletwise" | |
top: "res5_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res5_scale" | |
type: "Scale" | |
bottom: "res5_bn" | |
top: "res5_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5_relu" | |
type: "ReLU" | |
bottom: "res5_bn" | |
top: "res5_bn" | |
} | |
layer { | |
name: "res5_conv1" | |
type: "Convolution" | |
bottom: "res5_bn" | |
top: "res5_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res5_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res5_conv1" | |
top: "res5_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res5_conv1_scale" | |
type: "Scale" | |
bottom: "res5_conv1" | |
top: "res5_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5_conv1_relu" | |
type: "ReLU" | |
bottom: "res5_conv1" | |
top: "res5_conv1" | |
} | |
layer { | |
name: "res5_conv2" | |
type: "Convolution" | |
bottom: "res5_conv1" | |
top: "res5_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res5_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res5_conv2" | |
top: "res5_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res5_conv2_scale" | |
type: "Scale" | |
bottom: "res5_conv2" | |
top: "res5_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5_conv2_relu" | |
type: "ReLU" | |
bottom: "res5_conv2" | |
top: "res5_conv2" | |
} | |
layer { | |
name: "res5_conv3" | |
type: "Convolution" | |
bottom: "res5_conv2" | |
top: "res5_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res5_eletwise" | |
type: "Eltwise" | |
bottom: "res4_eletwise" | |
bottom: "res5_conv3" | |
top: "res5_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res6_bn" | |
type: "BatchNorm" | |
bottom: "res5_eletwise" | |
top: "res6_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res6_scale" | |
type: "Scale" | |
bottom: "res6_bn" | |
top: "res6_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res6_relu" | |
type: "ReLU" | |
bottom: "res6_bn" | |
top: "res6_bn" | |
} | |
layer { | |
name: "res6_conv1" | |
type: "Convolution" | |
bottom: "res6_bn" | |
top: "res6_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res6_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res6_conv1" | |
top: "res6_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res6_conv1_scale" | |
type: "Scale" | |
bottom: "res6_conv1" | |
top: "res6_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res6_conv1_relu" | |
type: "ReLU" | |
bottom: "res6_conv1" | |
top: "res6_conv1" | |
} | |
layer { | |
name: "res6_conv2" | |
type: "Convolution" | |
bottom: "res6_conv1" | |
top: "res6_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res6_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res6_conv2" | |
top: "res6_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res6_conv2_scale" | |
type: "Scale" | |
bottom: "res6_conv2" | |
top: "res6_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res6_conv2_relu" | |
type: "ReLU" | |
bottom: "res6_conv2" | |
top: "res6_conv2" | |
} | |
layer { | |
name: "res6_conv3" | |
type: "Convolution" | |
bottom: "res6_conv2" | |
top: "res6_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res6_eletwise" | |
type: "Eltwise" | |
bottom: "res5_eletwise" | |
bottom: "res6_conv3" | |
top: "res6_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res7_bn" | |
type: "BatchNorm" | |
bottom: "res6_eletwise" | |
top: "res7_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res7_scale" | |
type: "Scale" | |
bottom: "res7_bn" | |
top: "res7_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res7_relu" | |
type: "ReLU" | |
bottom: "res7_bn" | |
top: "res7_bn" | |
} | |
layer { | |
name: "res7_conv1" | |
type: "Convolution" | |
bottom: "res7_bn" | |
top: "res7_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res7_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res7_conv1" | |
top: "res7_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res7_conv1_scale" | |
type: "Scale" | |
bottom: "res7_conv1" | |
top: "res7_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res7_conv1_relu" | |
type: "ReLU" | |
bottom: "res7_conv1" | |
top: "res7_conv1" | |
} | |
layer { | |
name: "res7_conv2" | |
type: "Convolution" | |
bottom: "res7_conv1" | |
top: "res7_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res7_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res7_conv2" | |
top: "res7_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res7_conv2_scale" | |
type: "Scale" | |
bottom: "res7_conv2" | |
top: "res7_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res7_conv2_relu" | |
type: "ReLU" | |
bottom: "res7_conv2" | |
top: "res7_conv2" | |
} | |
layer { | |
name: "res7_conv3" | |
type: "Convolution" | |
bottom: "res7_conv2" | |
top: "res7_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res7_eletwise" | |
type: "Eltwise" | |
bottom: "res6_eletwise" | |
bottom: "res7_conv3" | |
top: "res7_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res8_bn" | |
type: "BatchNorm" | |
bottom: "res7_eletwise" | |
top: "res8_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res8_scale" | |
type: "Scale" | |
bottom: "res8_bn" | |
top: "res8_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res8_relu" | |
type: "ReLU" | |
bottom: "res8_bn" | |
top: "res8_bn" | |
} | |
layer { | |
name: "res8_conv1" | |
type: "Convolution" | |
bottom: "res8_bn" | |
top: "res8_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res8_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res8_conv1" | |
top: "res8_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res8_conv1_scale" | |
type: "Scale" | |
bottom: "res8_conv1" | |
top: "res8_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res8_conv1_relu" | |
type: "ReLU" | |
bottom: "res8_conv1" | |
top: "res8_conv1" | |
} | |
layer { | |
name: "res8_conv2" | |
type: "Convolution" | |
bottom: "res8_conv1" | |
top: "res8_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res8_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res8_conv2" | |
top: "res8_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res8_conv2_scale" | |
type: "Scale" | |
bottom: "res8_conv2" | |
top: "res8_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res8_conv2_relu" | |
type: "ReLU" | |
bottom: "res8_conv2" | |
top: "res8_conv2" | |
} | |
layer { | |
name: "res8_conv3" | |
type: "Convolution" | |
bottom: "res8_conv2" | |
top: "res8_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res8_eletwise" | |
type: "Eltwise" | |
bottom: "res7_eletwise" | |
bottom: "res8_conv3" | |
top: "res8_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res9_bn" | |
type: "BatchNorm" | |
bottom: "res8_eletwise" | |
top: "res9_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res9_scale" | |
type: "Scale" | |
bottom: "res9_bn" | |
top: "res9_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res9_relu" | |
type: "ReLU" | |
bottom: "res9_bn" | |
top: "res9_bn" | |
} | |
layer { | |
name: "res9_conv1" | |
type: "Convolution" | |
bottom: "res9_bn" | |
top: "res9_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res9_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res9_conv1" | |
top: "res9_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res9_conv1_scale" | |
type: "Scale" | |
bottom: "res9_conv1" | |
top: "res9_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res9_conv1_relu" | |
type: "ReLU" | |
bottom: "res9_conv1" | |
top: "res9_conv1" | |
} | |
layer { | |
name: "res9_conv2" | |
type: "Convolution" | |
bottom: "res9_conv1" | |
top: "res9_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res9_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res9_conv2" | |
top: "res9_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res9_conv2_scale" | |
type: "Scale" | |
bottom: "res9_conv2" | |
top: "res9_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res9_conv2_relu" | |
type: "ReLU" | |
bottom: "res9_conv2" | |
top: "res9_conv2" | |
} | |
layer { | |
name: "res9_conv3" | |
type: "Convolution" | |
bottom: "res9_conv2" | |
top: "res9_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res9_eletwise" | |
type: "Eltwise" | |
bottom: "res8_eletwise" | |
bottom: "res9_conv3" | |
top: "res9_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res10_bn" | |
type: "BatchNorm" | |
bottom: "res9_eletwise" | |
top: "res10_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res10_scale" | |
type: "Scale" | |
bottom: "res10_bn" | |
top: "res10_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res10_relu" | |
type: "ReLU" | |
bottom: "res10_bn" | |
top: "res10_bn" | |
} | |
layer { | |
name: "res10_conv1" | |
type: "Convolution" | |
bottom: "res10_bn" | |
top: "res10_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res10_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res10_conv1" | |
top: "res10_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res10_conv1_scale" | |
type: "Scale" | |
bottom: "res10_conv1" | |
top: "res10_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res10_conv1_relu" | |
type: "ReLU" | |
bottom: "res10_conv1" | |
top: "res10_conv1" | |
} | |
layer { | |
name: "res10_conv2" | |
type: "Convolution" | |
bottom: "res10_conv1" | |
top: "res10_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res10_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res10_conv2" | |
top: "res10_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res10_conv2_scale" | |
type: "Scale" | |
bottom: "res10_conv2" | |
top: "res10_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res10_conv2_relu" | |
type: "ReLU" | |
bottom: "res10_conv2" | |
top: "res10_conv2" | |
} | |
layer { | |
name: "res10_conv3" | |
type: "Convolution" | |
bottom: "res10_conv2" | |
top: "res10_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res10_eletwise" | |
type: "Eltwise" | |
bottom: "res9_eletwise" | |
bottom: "res10_conv3" | |
top: "res10_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res11_bn" | |
type: "BatchNorm" | |
bottom: "res10_eletwise" | |
top: "res11_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res11_scale" | |
type: "Scale" | |
bottom: "res11_bn" | |
top: "res11_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res11_relu" | |
type: "ReLU" | |
bottom: "res11_bn" | |
top: "res11_bn" | |
} | |
layer { | |
name: "res11_conv1" | |
type: "Convolution" | |
bottom: "res11_bn" | |
top: "res11_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res11_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res11_conv1" | |
top: "res11_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res11_conv1_scale" | |
type: "Scale" | |
bottom: "res11_conv1" | |
top: "res11_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res11_conv1_relu" | |
type: "ReLU" | |
bottom: "res11_conv1" | |
top: "res11_conv1" | |
} | |
layer { | |
name: "res11_conv2" | |
type: "Convolution" | |
bottom: "res11_conv1" | |
top: "res11_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res11_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res11_conv2" | |
top: "res11_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res11_conv2_scale" | |
type: "Scale" | |
bottom: "res11_conv2" | |
top: "res11_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res11_conv2_relu" | |
type: "ReLU" | |
bottom: "res11_conv2" | |
top: "res11_conv2" | |
} | |
layer { | |
name: "res11_conv3" | |
type: "Convolution" | |
bottom: "res11_conv2" | |
top: "res11_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res11_eletwise" | |
type: "Eltwise" | |
bottom: "res10_eletwise" | |
bottom: "res11_conv3" | |
top: "res11_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res12_bn" | |
type: "BatchNorm" | |
bottom: "res11_eletwise" | |
top: "res12_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res12_scale" | |
type: "Scale" | |
bottom: "res12_bn" | |
top: "res12_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res12_relu" | |
type: "ReLU" | |
bottom: "res12_bn" | |
top: "res12_bn" | |
} | |
layer { | |
name: "res12_conv1" | |
type: "Convolution" | |
bottom: "res12_bn" | |
top: "res12_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res12_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res12_conv1" | |
top: "res12_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res12_conv1_scale" | |
type: "Scale" | |
bottom: "res12_conv1" | |
top: "res12_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res12_conv1_relu" | |
type: "ReLU" | |
bottom: "res12_conv1" | |
top: "res12_conv1" | |
} | |
layer { | |
name: "res12_conv2" | |
type: "Convolution" | |
bottom: "res12_conv1" | |
top: "res12_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res12_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res12_conv2" | |
top: "res12_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res12_conv2_scale" | |
type: "Scale" | |
bottom: "res12_conv2" | |
top: "res12_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res12_conv2_relu" | |
type: "ReLU" | |
bottom: "res12_conv2" | |
top: "res12_conv2" | |
} | |
layer { | |
name: "res12_conv3" | |
type: "Convolution" | |
bottom: "res12_conv2" | |
top: "res12_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res12_eletwise" | |
type: "Eltwise" | |
bottom: "res11_eletwise" | |
bottom: "res12_conv3" | |
top: "res12_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res13_bn" | |
type: "BatchNorm" | |
bottom: "res12_eletwise" | |
top: "res13_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res13_scale" | |
type: "Scale" | |
bottom: "res13_bn" | |
top: "res13_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res13_relu" | |
type: "ReLU" | |
bottom: "res13_bn" | |
top: "res13_bn" | |
} | |
layer { | |
name: "res13_conv1" | |
type: "Convolution" | |
bottom: "res13_bn" | |
top: "res13_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res13_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res13_conv1" | |
top: "res13_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res13_conv1_scale" | |
type: "Scale" | |
bottom: "res13_conv1" | |
top: "res13_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res13_conv1_relu" | |
type: "ReLU" | |
bottom: "res13_conv1" | |
top: "res13_conv1" | |
} | |
layer { | |
name: "res13_conv2" | |
type: "Convolution" | |
bottom: "res13_conv1" | |
top: "res13_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res13_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res13_conv2" | |
top: "res13_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res13_conv2_scale" | |
type: "Scale" | |
bottom: "res13_conv2" | |
top: "res13_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res13_conv2_relu" | |
type: "ReLU" | |
bottom: "res13_conv2" | |
top: "res13_conv2" | |
} | |
layer { | |
name: "res13_conv3" | |
type: "Convolution" | |
bottom: "res13_conv2" | |
top: "res13_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res13_eletwise" | |
type: "Eltwise" | |
bottom: "res12_eletwise" | |
bottom: "res13_conv3" | |
top: "res13_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res14_bn" | |
type: "BatchNorm" | |
bottom: "res13_eletwise" | |
top: "res14_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res14_scale" | |
type: "Scale" | |
bottom: "res14_bn" | |
top: "res14_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res14_relu" | |
type: "ReLU" | |
bottom: "res14_bn" | |
top: "res14_bn" | |
} | |
layer { | |
name: "res14_conv1" | |
type: "Convolution" | |
bottom: "res14_bn" | |
top: "res14_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res14_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res14_conv1" | |
top: "res14_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res14_conv1_scale" | |
type: "Scale" | |
bottom: "res14_conv1" | |
top: "res14_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res14_conv1_relu" | |
type: "ReLU" | |
bottom: "res14_conv1" | |
top: "res14_conv1" | |
} | |
layer { | |
name: "res14_conv2" | |
type: "Convolution" | |
bottom: "res14_conv1" | |
top: "res14_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res14_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res14_conv2" | |
top: "res14_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res14_conv2_scale" | |
type: "Scale" | |
bottom: "res14_conv2" | |
top: "res14_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res14_conv2_relu" | |
type: "ReLU" | |
bottom: "res14_conv2" | |
top: "res14_conv2" | |
} | |
layer { | |
name: "res14_conv3" | |
type: "Convolution" | |
bottom: "res14_conv2" | |
top: "res14_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res14_eletwise" | |
type: "Eltwise" | |
bottom: "res13_eletwise" | |
bottom: "res14_conv3" | |
top: "res14_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res15_bn" | |
type: "BatchNorm" | |
bottom: "res14_eletwise" | |
top: "res15_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res15_scale" | |
type: "Scale" | |
bottom: "res15_bn" | |
top: "res15_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res15_relu" | |
type: "ReLU" | |
bottom: "res15_bn" | |
top: "res15_bn" | |
} | |
layer { | |
name: "res15_conv1" | |
type: "Convolution" | |
bottom: "res15_bn" | |
top: "res15_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res15_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res15_conv1" | |
top: "res15_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res15_conv1_scale" | |
type: "Scale" | |
bottom: "res15_conv1" | |
top: "res15_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res15_conv1_relu" | |
type: "ReLU" | |
bottom: "res15_conv1" | |
top: "res15_conv1" | |
} | |
layer { | |
name: "res15_conv2" | |
type: "Convolution" | |
bottom: "res15_conv1" | |
top: "res15_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res15_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res15_conv2" | |
top: "res15_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res15_conv2_scale" | |
type: "Scale" | |
bottom: "res15_conv2" | |
top: "res15_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res15_conv2_relu" | |
type: "ReLU" | |
bottom: "res15_conv2" | |
top: "res15_conv2" | |
} | |
layer { | |
name: "res15_conv3" | |
type: "Convolution" | |
bottom: "res15_conv2" | |
top: "res15_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res15_eletwise" | |
type: "Eltwise" | |
bottom: "res14_eletwise" | |
bottom: "res15_conv3" | |
top: "res15_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res16_bn" | |
type: "BatchNorm" | |
bottom: "res15_eletwise" | |
top: "res16_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res16_scale" | |
type: "Scale" | |
bottom: "res16_bn" | |
top: "res16_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res16_relu" | |
type: "ReLU" | |
bottom: "res16_bn" | |
top: "res16_bn" | |
} | |
layer { | |
name: "res16_conv1" | |
type: "Convolution" | |
bottom: "res16_bn" | |
top: "res16_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res16_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res16_conv1" | |
top: "res16_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res16_conv1_scale" | |
type: "Scale" | |
bottom: "res16_conv1" | |
top: "res16_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res16_conv1_relu" | |
type: "ReLU" | |
bottom: "res16_conv1" | |
top: "res16_conv1" | |
} | |
layer { | |
name: "res16_conv2" | |
type: "Convolution" | |
bottom: "res16_conv1" | |
top: "res16_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res16_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res16_conv2" | |
top: "res16_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res16_conv2_scale" | |
type: "Scale" | |
bottom: "res16_conv2" | |
top: "res16_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res16_conv2_relu" | |
type: "ReLU" | |
bottom: "res16_conv2" | |
top: "res16_conv2" | |
} | |
layer { | |
name: "res16_conv3" | |
type: "Convolution" | |
bottom: "res16_conv2" | |
top: "res16_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res16_eletwise" | |
type: "Eltwise" | |
bottom: "res15_eletwise" | |
bottom: "res16_conv3" | |
top: "res16_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res17_bn" | |
type: "BatchNorm" | |
bottom: "res16_eletwise" | |
top: "res17_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res17_scale" | |
type: "Scale" | |
bottom: "res17_bn" | |
top: "res17_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res17_relu" | |
type: "ReLU" | |
bottom: "res17_bn" | |
top: "res17_bn" | |
} | |
layer { | |
name: "res17_conv1" | |
type: "Convolution" | |
bottom: "res17_bn" | |
top: "res17_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res17_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res17_conv1" | |
top: "res17_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res17_conv1_scale" | |
type: "Scale" | |
bottom: "res17_conv1" | |
top: "res17_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res17_conv1_relu" | |
type: "ReLU" | |
bottom: "res17_conv1" | |
top: "res17_conv1" | |
} | |
layer { | |
name: "res17_conv2" | |
type: "Convolution" | |
bottom: "res17_conv1" | |
top: "res17_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res17_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res17_conv2" | |
top: "res17_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res17_conv2_scale" | |
type: "Scale" | |
bottom: "res17_conv2" | |
top: "res17_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res17_conv2_relu" | |
type: "ReLU" | |
bottom: "res17_conv2" | |
top: "res17_conv2" | |
} | |
layer { | |
name: "res17_conv3" | |
type: "Convolution" | |
bottom: "res17_conv2" | |
top: "res17_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res17_eletwise" | |
type: "Eltwise" | |
bottom: "res16_eletwise" | |
bottom: "res17_conv3" | |
top: "res17_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res18_bn" | |
type: "BatchNorm" | |
bottom: "res17_eletwise" | |
top: "res18_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res18_scale" | |
type: "Scale" | |
bottom: "res18_bn" | |
top: "res18_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res18_relu" | |
type: "ReLU" | |
bottom: "res18_bn" | |
top: "res18_bn" | |
} | |
layer { | |
name: "res18_conv1" | |
type: "Convolution" | |
bottom: "res18_bn" | |
top: "res18_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res18_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res18_conv1" | |
top: "res18_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res18_conv1_scale" | |
type: "Scale" | |
bottom: "res18_conv1" | |
top: "res18_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res18_conv1_relu" | |
type: "ReLU" | |
bottom: "res18_conv1" | |
top: "res18_conv1" | |
} | |
layer { | |
name: "res18_conv2" | |
type: "Convolution" | |
bottom: "res18_conv1" | |
top: "res18_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res18_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res18_conv2" | |
top: "res18_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res18_conv2_scale" | |
type: "Scale" | |
bottom: "res18_conv2" | |
top: "res18_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res18_conv2_relu" | |
type: "ReLU" | |
bottom: "res18_conv2" | |
top: "res18_conv2" | |
} | |
layer { | |
name: "res18_conv3" | |
type: "Convolution" | |
bottom: "res18_conv2" | |
top: "res18_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res18_eletwise" | |
type: "Eltwise" | |
bottom: "res17_eletwise" | |
bottom: "res18_conv3" | |
top: "res18_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res19_bn" | |
type: "BatchNorm" | |
bottom: "res18_eletwise" | |
top: "res19_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res19_scale" | |
type: "Scale" | |
bottom: "res19_bn" | |
top: "res19_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res19_relu" | |
type: "ReLU" | |
bottom: "res19_bn" | |
top: "res19_bn" | |
} | |
layer { | |
name: "res19_conv1" | |
type: "Convolution" | |
bottom: "res19_bn" | |
top: "res19_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res19_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res19_conv1" | |
top: "res19_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res19_conv1_scale" | |
type: "Scale" | |
bottom: "res19_conv1" | |
top: "res19_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res19_conv1_relu" | |
type: "ReLU" | |
bottom: "res19_conv1" | |
top: "res19_conv1" | |
} | |
layer { | |
name: "res19_conv2" | |
type: "Convolution" | |
bottom: "res19_conv1" | |
top: "res19_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res19_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res19_conv2" | |
top: "res19_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res19_conv2_scale" | |
type: "Scale" | |
bottom: "res19_conv2" | |
top: "res19_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res19_conv2_relu" | |
type: "ReLU" | |
bottom: "res19_conv2" | |
top: "res19_conv2" | |
} | |
layer { | |
name: "res19_conv3" | |
type: "Convolution" | |
bottom: "res19_conv2" | |
top: "res19_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res19_eletwise" | |
type: "Eltwise" | |
bottom: "res18_eletwise" | |
bottom: "res19_conv3" | |
top: "res19_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res20_bn" | |
type: "BatchNorm" | |
bottom: "res19_eletwise" | |
top: "res20_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res20_scale" | |
type: "Scale" | |
bottom: "res20_bn" | |
top: "res20_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res20_relu" | |
type: "ReLU" | |
bottom: "res20_bn" | |
top: "res20_bn" | |
} | |
layer { | |
name: "res20_conv1" | |
type: "Convolution" | |
bottom: "res20_bn" | |
top: "res20_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res20_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res20_conv1" | |
top: "res20_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res20_conv1_scale" | |
type: "Scale" | |
bottom: "res20_conv1" | |
top: "res20_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res20_conv1_relu" | |
type: "ReLU" | |
bottom: "res20_conv1" | |
top: "res20_conv1" | |
} | |
layer { | |
name: "res20_conv2" | |
type: "Convolution" | |
bottom: "res20_conv1" | |
top: "res20_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res20_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res20_conv2" | |
top: "res20_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res20_conv2_scale" | |
type: "Scale" | |
bottom: "res20_conv2" | |
top: "res20_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res20_conv2_relu" | |
type: "ReLU" | |
bottom: "res20_conv2" | |
top: "res20_conv2" | |
} | |
layer { | |
name: "res20_conv3" | |
type: "Convolution" | |
bottom: "res20_conv2" | |
top: "res20_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res20_eletwise" | |
type: "Eltwise" | |
bottom: "res19_eletwise" | |
bottom: "res20_conv3" | |
top: "res20_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res21_bn" | |
type: "BatchNorm" | |
bottom: "res20_eletwise" | |
top: "res21_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res21_scale" | |
type: "Scale" | |
bottom: "res21_bn" | |
top: "res21_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res21_relu" | |
type: "ReLU" | |
bottom: "res21_bn" | |
top: "res21_bn" | |
} | |
layer { | |
name: "res21_conv1" | |
type: "Convolution" | |
bottom: "res21_bn" | |
top: "res21_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res21_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res21_conv1" | |
top: "res21_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res21_conv1_scale" | |
type: "Scale" | |
bottom: "res21_conv1" | |
top: "res21_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res21_conv1_relu" | |
type: "ReLU" | |
bottom: "res21_conv1" | |
top: "res21_conv1" | |
} | |
layer { | |
name: "res21_conv2" | |
type: "Convolution" | |
bottom: "res21_conv1" | |
top: "res21_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res21_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res21_conv2" | |
top: "res21_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res21_conv2_scale" | |
type: "Scale" | |
bottom: "res21_conv2" | |
top: "res21_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res21_conv2_relu" | |
type: "ReLU" | |
bottom: "res21_conv2" | |
top: "res21_conv2" | |
} | |
layer { | |
name: "res21_conv3" | |
type: "Convolution" | |
bottom: "res21_conv2" | |
top: "res21_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res21_eletwise" | |
type: "Eltwise" | |
bottom: "res20_eletwise" | |
bottom: "res21_conv3" | |
top: "res21_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res22_bn" | |
type: "BatchNorm" | |
bottom: "res21_eletwise" | |
top: "res22_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res22_scale" | |
type: "Scale" | |
bottom: "res22_bn" | |
top: "res22_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res22_relu" | |
type: "ReLU" | |
bottom: "res22_bn" | |
top: "res22_bn" | |
} | |
layer { | |
name: "res22_conv1" | |
type: "Convolution" | |
bottom: "res22_bn" | |
top: "res22_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res22_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res22_conv1" | |
top: "res22_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res22_conv1_scale" | |
type: "Scale" | |
bottom: "res22_conv1" | |
top: "res22_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res22_conv1_relu" | |
type: "ReLU" | |
bottom: "res22_conv1" | |
top: "res22_conv1" | |
} | |
layer { | |
name: "res22_conv2" | |
type: "Convolution" | |
bottom: "res22_conv1" | |
top: "res22_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res22_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res22_conv2" | |
top: "res22_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res22_conv2_scale" | |
type: "Scale" | |
bottom: "res22_conv2" | |
top: "res22_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res22_conv2_relu" | |
type: "ReLU" | |
bottom: "res22_conv2" | |
top: "res22_conv2" | |
} | |
layer { | |
name: "res22_conv3" | |
type: "Convolution" | |
bottom: "res22_conv2" | |
top: "res22_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res22_eletwise" | |
type: "Eltwise" | |
bottom: "res21_eletwise" | |
bottom: "res22_conv3" | |
top: "res22_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res23_bn" | |
type: "BatchNorm" | |
bottom: "res22_eletwise" | |
top: "res23_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res23_scale" | |
type: "Scale" | |
bottom: "res23_bn" | |
top: "res23_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res23_relu" | |
type: "ReLU" | |
bottom: "res23_bn" | |
top: "res23_bn" | |
} | |
layer { | |
name: "res23_conv1" | |
type: "Convolution" | |
bottom: "res23_bn" | |
top: "res23_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res23_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res23_conv1" | |
top: "res23_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res23_conv1_scale" | |
type: "Scale" | |
bottom: "res23_conv1" | |
top: "res23_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res23_conv1_relu" | |
type: "ReLU" | |
bottom: "res23_conv1" | |
top: "res23_conv1" | |
} | |
layer { | |
name: "res23_conv2" | |
type: "Convolution" | |
bottom: "res23_conv1" | |
top: "res23_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res23_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res23_conv2" | |
top: "res23_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res23_conv2_scale" | |
type: "Scale" | |
bottom: "res23_conv2" | |
top: "res23_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res23_conv2_relu" | |
type: "ReLU" | |
bottom: "res23_conv2" | |
top: "res23_conv2" | |
} | |
layer { | |
name: "res23_conv3" | |
type: "Convolution" | |
bottom: "res23_conv2" | |
top: "res23_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res23_eletwise" | |
type: "Eltwise" | |
bottom: "res22_eletwise" | |
bottom: "res23_conv3" | |
top: "res23_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res24_bn" | |
type: "BatchNorm" | |
bottom: "res23_eletwise" | |
top: "res24_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res24_scale" | |
type: "Scale" | |
bottom: "res24_bn" | |
top: "res24_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res24_relu" | |
type: "ReLU" | |
bottom: "res24_bn" | |
top: "res24_bn" | |
} | |
layer { | |
name: "res24_conv1" | |
type: "Convolution" | |
bottom: "res24_bn" | |
top: "res24_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res24_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res24_conv1" | |
top: "res24_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res24_conv1_scale" | |
type: "Scale" | |
bottom: "res24_conv1" | |
top: "res24_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res24_conv1_relu" | |
type: "ReLU" | |
bottom: "res24_conv1" | |
top: "res24_conv1" | |
} | |
layer { | |
name: "res24_conv2" | |
type: "Convolution" | |
bottom: "res24_conv1" | |
top: "res24_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res24_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res24_conv2" | |
top: "res24_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res24_conv2_scale" | |
type: "Scale" | |
bottom: "res24_conv2" | |
top: "res24_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res24_conv2_relu" | |
type: "ReLU" | |
bottom: "res24_conv2" | |
top: "res24_conv2" | |
} | |
layer { | |
name: "res24_conv3" | |
type: "Convolution" | |
bottom: "res24_conv2" | |
top: "res24_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res24_eletwise" | |
type: "Eltwise" | |
bottom: "res23_eletwise" | |
bottom: "res24_conv3" | |
top: "res24_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res25_bn" | |
type: "BatchNorm" | |
bottom: "res24_eletwise" | |
top: "res25_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res25_scale" | |
type: "Scale" | |
bottom: "res25_bn" | |
top: "res25_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res25_relu" | |
type: "ReLU" | |
bottom: "res25_bn" | |
top: "res25_bn" | |
} | |
layer { | |
name: "res25_conv1" | |
type: "Convolution" | |
bottom: "res25_bn" | |
top: "res25_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res25_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res25_conv1" | |
top: "res25_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res25_conv1_scale" | |
type: "Scale" | |
bottom: "res25_conv1" | |
top: "res25_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res25_conv1_relu" | |
type: "ReLU" | |
bottom: "res25_conv1" | |
top: "res25_conv1" | |
} | |
layer { | |
name: "res25_conv2" | |
type: "Convolution" | |
bottom: "res25_conv1" | |
top: "res25_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res25_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res25_conv2" | |
top: "res25_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res25_conv2_scale" | |
type: "Scale" | |
bottom: "res25_conv2" | |
top: "res25_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res25_conv2_relu" | |
type: "ReLU" | |
bottom: "res25_conv2" | |
top: "res25_conv2" | |
} | |
layer { | |
name: "res25_conv3" | |
type: "Convolution" | |
bottom: "res25_conv2" | |
top: "res25_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res25_eletwise" | |
type: "Eltwise" | |
bottom: "res24_eletwise" | |
bottom: "res25_conv3" | |
top: "res25_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res26_bn" | |
type: "BatchNorm" | |
bottom: "res25_eletwise" | |
top: "res26_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res26_scale" | |
type: "Scale" | |
bottom: "res26_bn" | |
top: "res26_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res26_relu" | |
type: "ReLU" | |
bottom: "res26_bn" | |
top: "res26_bn" | |
} | |
layer { | |
name: "res26_conv1" | |
type: "Convolution" | |
bottom: "res26_bn" | |
top: "res26_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res26_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res26_conv1" | |
top: "res26_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res26_conv1_scale" | |
type: "Scale" | |
bottom: "res26_conv1" | |
top: "res26_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res26_conv1_relu" | |
type: "ReLU" | |
bottom: "res26_conv1" | |
top: "res26_conv1" | |
} | |
layer { | |
name: "res26_conv2" | |
type: "Convolution" | |
bottom: "res26_conv1" | |
top: "res26_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res26_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res26_conv2" | |
top: "res26_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res26_conv2_scale" | |
type: "Scale" | |
bottom: "res26_conv2" | |
top: "res26_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res26_conv2_relu" | |
type: "ReLU" | |
bottom: "res26_conv2" | |
top: "res26_conv2" | |
} | |
layer { | |
name: "res26_conv3" | |
type: "Convolution" | |
bottom: "res26_conv2" | |
top: "res26_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res26_eletwise" | |
type: "Eltwise" | |
bottom: "res25_eletwise" | |
bottom: "res26_conv3" | |
top: "res26_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res27_bn" | |
type: "BatchNorm" | |
bottom: "res26_eletwise" | |
top: "res27_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res27_scale" | |
type: "Scale" | |
bottom: "res27_bn" | |
top: "res27_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res27_relu" | |
type: "ReLU" | |
bottom: "res27_bn" | |
top: "res27_bn" | |
} | |
layer { | |
name: "res27_conv1" | |
type: "Convolution" | |
bottom: "res27_bn" | |
top: "res27_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res27_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res27_conv1" | |
top: "res27_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res27_conv1_scale" | |
type: "Scale" | |
bottom: "res27_conv1" | |
top: "res27_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res27_conv1_relu" | |
type: "ReLU" | |
bottom: "res27_conv1" | |
top: "res27_conv1" | |
} | |
layer { | |
name: "res27_conv2" | |
type: "Convolution" | |
bottom: "res27_conv1" | |
top: "res27_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res27_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res27_conv2" | |
top: "res27_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res27_conv2_scale" | |
type: "Scale" | |
bottom: "res27_conv2" | |
top: "res27_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res27_conv2_relu" | |
type: "ReLU" | |
bottom: "res27_conv2" | |
top: "res27_conv2" | |
} | |
layer { | |
name: "res27_conv3" | |
type: "Convolution" | |
bottom: "res27_conv2" | |
top: "res27_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res27_eletwise" | |
type: "Eltwise" | |
bottom: "res26_eletwise" | |
bottom: "res27_conv3" | |
top: "res27_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res28_bn" | |
type: "BatchNorm" | |
bottom: "res27_eletwise" | |
top: "res28_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res28_scale" | |
type: "Scale" | |
bottom: "res28_bn" | |
top: "res28_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res28_relu" | |
type: "ReLU" | |
bottom: "res28_bn" | |
top: "res28_bn" | |
} | |
layer { | |
name: "res28_conv1" | |
type: "Convolution" | |
bottom: "res28_bn" | |
top: "res28_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res28_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res28_conv1" | |
top: "res28_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res28_conv1_scale" | |
type: "Scale" | |
bottom: "res28_conv1" | |
top: "res28_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res28_conv1_relu" | |
type: "ReLU" | |
bottom: "res28_conv1" | |
top: "res28_conv1" | |
} | |
layer { | |
name: "res28_conv2" | |
type: "Convolution" | |
bottom: "res28_conv1" | |
top: "res28_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res28_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res28_conv2" | |
top: "res28_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res28_conv2_scale" | |
type: "Scale" | |
bottom: "res28_conv2" | |
top: "res28_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res28_conv2_relu" | |
type: "ReLU" | |
bottom: "res28_conv2" | |
top: "res28_conv2" | |
} | |
layer { | |
name: "res28_conv3" | |
type: "Convolution" | |
bottom: "res28_conv2" | |
top: "res28_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res28_eletwise" | |
type: "Eltwise" | |
bottom: "res27_eletwise" | |
bottom: "res28_conv3" | |
top: "res28_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res29_bn" | |
type: "BatchNorm" | |
bottom: "res28_eletwise" | |
top: "res29_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res29_scale" | |
type: "Scale" | |
bottom: "res29_bn" | |
top: "res29_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res29_relu" | |
type: "ReLU" | |
bottom: "res29_bn" | |
top: "res29_bn" | |
} | |
layer { | |
name: "res29_conv1" | |
type: "Convolution" | |
bottom: "res29_bn" | |
top: "res29_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res29_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res29_conv1" | |
top: "res29_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res29_conv1_scale" | |
type: "Scale" | |
bottom: "res29_conv1" | |
top: "res29_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res29_conv1_relu" | |
type: "ReLU" | |
bottom: "res29_conv1" | |
top: "res29_conv1" | |
} | |
layer { | |
name: "res29_conv2" | |
type: "Convolution" | |
bottom: "res29_conv1" | |
top: "res29_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res29_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res29_conv2" | |
top: "res29_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res29_conv2_scale" | |
type: "Scale" | |
bottom: "res29_conv2" | |
top: "res29_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res29_conv2_relu" | |
type: "ReLU" | |
bottom: "res29_conv2" | |
top: "res29_conv2" | |
} | |
layer { | |
name: "res29_conv3" | |
type: "Convolution" | |
bottom: "res29_conv2" | |
top: "res29_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res29_eletwise" | |
type: "Eltwise" | |
bottom: "res28_eletwise" | |
bottom: "res29_conv3" | |
top: "res29_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res30_bn" | |
type: "BatchNorm" | |
bottom: "res29_eletwise" | |
top: "res30_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res30_scale" | |
type: "Scale" | |
bottom: "res30_bn" | |
top: "res30_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res30_relu" | |
type: "ReLU" | |
bottom: "res30_bn" | |
top: "res30_bn" | |
} | |
layer { | |
name: "res30_conv1" | |
type: "Convolution" | |
bottom: "res30_bn" | |
top: "res30_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res30_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res30_conv1" | |
top: "res30_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res30_conv1_scale" | |
type: "Scale" | |
bottom: "res30_conv1" | |
top: "res30_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res30_conv1_relu" | |
type: "ReLU" | |
bottom: "res30_conv1" | |
top: "res30_conv1" | |
} | |
layer { | |
name: "res30_conv2" | |
type: "Convolution" | |
bottom: "res30_conv1" | |
top: "res30_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res30_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res30_conv2" | |
top: "res30_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res30_conv2_scale" | |
type: "Scale" | |
bottom: "res30_conv2" | |
top: "res30_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res30_conv2_relu" | |
type: "ReLU" | |
bottom: "res30_conv2" | |
top: "res30_conv2" | |
} | |
layer { | |
name: "res30_conv3" | |
type: "Convolution" | |
bottom: "res30_conv2" | |
top: "res30_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res30_eletwise" | |
type: "Eltwise" | |
bottom: "res29_eletwise" | |
bottom: "res30_conv3" | |
top: "res30_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res31_bn" | |
type: "BatchNorm" | |
bottom: "res30_eletwise" | |
top: "res31_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res31_scale" | |
type: "Scale" | |
bottom: "res31_bn" | |
top: "res31_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res31_relu" | |
type: "ReLU" | |
bottom: "res31_bn" | |
top: "res31_bn" | |
} | |
layer { | |
name: "res31_conv1" | |
type: "Convolution" | |
bottom: "res31_bn" | |
top: "res31_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res31_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res31_conv1" | |
top: "res31_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res31_conv1_scale" | |
type: "Scale" | |
bottom: "res31_conv1" | |
top: "res31_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res31_conv1_relu" | |
type: "ReLU" | |
bottom: "res31_conv1" | |
top: "res31_conv1" | |
} | |
layer { | |
name: "res31_conv2" | |
type: "Convolution" | |
bottom: "res31_conv1" | |
top: "res31_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res31_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res31_conv2" | |
top: "res31_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res31_conv2_scale" | |
type: "Scale" | |
bottom: "res31_conv2" | |
top: "res31_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res31_conv2_relu" | |
type: "ReLU" | |
bottom: "res31_conv2" | |
top: "res31_conv2" | |
} | |
layer { | |
name: "res31_conv3" | |
type: "Convolution" | |
bottom: "res31_conv2" | |
top: "res31_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res31_eletwise" | |
type: "Eltwise" | |
bottom: "res30_eletwise" | |
bottom: "res31_conv3" | |
top: "res31_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res32_bn" | |
type: "BatchNorm" | |
bottom: "res31_eletwise" | |
top: "res32_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res32_scale" | |
type: "Scale" | |
bottom: "res32_bn" | |
top: "res32_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res32_relu" | |
type: "ReLU" | |
bottom: "res32_bn" | |
top: "res32_bn" | |
} | |
layer { | |
name: "res32_conv1" | |
type: "Convolution" | |
bottom: "res32_bn" | |
top: "res32_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res32_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res32_conv1" | |
top: "res32_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res32_conv1_scale" | |
type: "Scale" | |
bottom: "res32_conv1" | |
top: "res32_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res32_conv1_relu" | |
type: "ReLU" | |
bottom: "res32_conv1" | |
top: "res32_conv1" | |
} | |
layer { | |
name: "res32_conv2" | |
type: "Convolution" | |
bottom: "res32_conv1" | |
top: "res32_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res32_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res32_conv2" | |
top: "res32_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res32_conv2_scale" | |
type: "Scale" | |
bottom: "res32_conv2" | |
top: "res32_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res32_conv2_relu" | |
type: "ReLU" | |
bottom: "res32_conv2" | |
top: "res32_conv2" | |
} | |
layer { | |
name: "res32_conv3" | |
type: "Convolution" | |
bottom: "res32_conv2" | |
top: "res32_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res32_eletwise" | |
type: "Eltwise" | |
bottom: "res31_eletwise" | |
bottom: "res32_conv3" | |
top: "res32_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res33_bn" | |
type: "BatchNorm" | |
bottom: "res32_eletwise" | |
top: "res33_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res33_scale" | |
type: "Scale" | |
bottom: "res33_bn" | |
top: "res33_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res33_relu" | |
type: "ReLU" | |
bottom: "res33_bn" | |
top: "res33_bn" | |
} | |
layer { | |
name: "res33_conv1" | |
type: "Convolution" | |
bottom: "res33_bn" | |
top: "res33_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res33_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res33_conv1" | |
top: "res33_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res33_conv1_scale" | |
type: "Scale" | |
bottom: "res33_conv1" | |
top: "res33_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res33_conv1_relu" | |
type: "ReLU" | |
bottom: "res33_conv1" | |
top: "res33_conv1" | |
} | |
layer { | |
name: "res33_conv2" | |
type: "Convolution" | |
bottom: "res33_conv1" | |
top: "res33_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res33_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res33_conv2" | |
top: "res33_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res33_conv2_scale" | |
type: "Scale" | |
bottom: "res33_conv2" | |
top: "res33_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res33_conv2_relu" | |
type: "ReLU" | |
bottom: "res33_conv2" | |
top: "res33_conv2" | |
} | |
layer { | |
name: "res33_conv3" | |
type: "Convolution" | |
bottom: "res33_conv2" | |
top: "res33_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res33_eletwise" | |
type: "Eltwise" | |
bottom: "res32_eletwise" | |
bottom: "res33_conv3" | |
top: "res33_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res34_bn" | |
type: "BatchNorm" | |
bottom: "res33_eletwise" | |
top: "res34_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res34_scale" | |
type: "Scale" | |
bottom: "res34_bn" | |
top: "res34_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res34_relu" | |
type: "ReLU" | |
bottom: "res34_bn" | |
top: "res34_bn" | |
} | |
layer { | |
name: "res34_conv1" | |
type: "Convolution" | |
bottom: "res34_bn" | |
top: "res34_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res34_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res34_conv1" | |
top: "res34_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res34_conv1_scale" | |
type: "Scale" | |
bottom: "res34_conv1" | |
top: "res34_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res34_conv1_relu" | |
type: "ReLU" | |
bottom: "res34_conv1" | |
top: "res34_conv1" | |
} | |
layer { | |
name: "res34_conv2" | |
type: "Convolution" | |
bottom: "res34_conv1" | |
top: "res34_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res34_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res34_conv2" | |
top: "res34_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res34_conv2_scale" | |
type: "Scale" | |
bottom: "res34_conv2" | |
top: "res34_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res34_conv2_relu" | |
type: "ReLU" | |
bottom: "res34_conv2" | |
top: "res34_conv2" | |
} | |
layer { | |
name: "res34_conv3" | |
type: "Convolution" | |
bottom: "res34_conv2" | |
top: "res34_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res34_match_conv" | |
type: "Convolution" | |
bottom: "res34_bn" | |
top: "res34_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res34_eletwise" | |
type: "Eltwise" | |
bottom: "res34_match_conv" | |
bottom: "res34_conv3" | |
top: "res34_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res35_bn" | |
type: "BatchNorm" | |
bottom: "res34_eletwise" | |
top: "res35_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res35_scale" | |
type: "Scale" | |
bottom: "res35_bn" | |
top: "res35_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res35_relu" | |
type: "ReLU" | |
bottom: "res35_bn" | |
top: "res35_bn" | |
} | |
layer { | |
name: "res35_conv1" | |
type: "Convolution" | |
bottom: "res35_bn" | |
top: "res35_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res35_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res35_conv1" | |
top: "res35_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res35_conv1_scale" | |
type: "Scale" | |
bottom: "res35_conv1" | |
top: "res35_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res35_conv1_relu" | |
type: "ReLU" | |
bottom: "res35_conv1" | |
top: "res35_conv1" | |
} | |
layer { | |
name: "res35_conv2" | |
type: "Convolution" | |
bottom: "res35_conv1" | |
top: "res35_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res35_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res35_conv2" | |
top: "res35_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res35_conv2_scale" | |
type: "Scale" | |
bottom: "res35_conv2" | |
top: "res35_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res35_conv2_relu" | |
type: "ReLU" | |
bottom: "res35_conv2" | |
top: "res35_conv2" | |
} | |
layer { | |
name: "res35_conv3" | |
type: "Convolution" | |
bottom: "res35_conv2" | |
top: "res35_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res35_eletwise" | |
type: "Eltwise" | |
bottom: "res34_eletwise" | |
bottom: "res35_conv3" | |
top: "res35_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res36_bn" | |
type: "BatchNorm" | |
bottom: "res35_eletwise" | |
top: "res36_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res36_scale" | |
type: "Scale" | |
bottom: "res36_bn" | |
top: "res36_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res36_relu" | |
type: "ReLU" | |
bottom: "res36_bn" | |
top: "res36_bn" | |
} | |
layer { | |
name: "res36_conv1" | |
type: "Convolution" | |
bottom: "res36_bn" | |
top: "res36_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res36_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res36_conv1" | |
top: "res36_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res36_conv1_scale" | |
type: "Scale" | |
bottom: "res36_conv1" | |
top: "res36_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res36_conv1_relu" | |
type: "ReLU" | |
bottom: "res36_conv1" | |
top: "res36_conv1" | |
} | |
layer { | |
name: "res36_conv2" | |
type: "Convolution" | |
bottom: "res36_conv1" | |
top: "res36_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res36_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res36_conv2" | |
top: "res36_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res36_conv2_scale" | |
type: "Scale" | |
bottom: "res36_conv2" | |
top: "res36_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res36_conv2_relu" | |
type: "ReLU" | |
bottom: "res36_conv2" | |
top: "res36_conv2" | |
} | |
layer { | |
name: "res36_conv3" | |
type: "Convolution" | |
bottom: "res36_conv2" | |
top: "res36_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res36_eletwise" | |
type: "Eltwise" | |
bottom: "res35_eletwise" | |
bottom: "res36_conv3" | |
top: "res36_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res37_bn" | |
type: "BatchNorm" | |
bottom: "res36_eletwise" | |
top: "res37_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res37_scale" | |
type: "Scale" | |
bottom: "res37_bn" | |
top: "res37_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res37_relu" | |
type: "ReLU" | |
bottom: "res37_bn" | |
top: "res37_bn" | |
} | |
layer { | |
name: "res37_conv1" | |
type: "Convolution" | |
bottom: "res37_bn" | |
top: "res37_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res37_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res37_conv1" | |
top: "res37_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res37_conv1_scale" | |
type: "Scale" | |
bottom: "res37_conv1" | |
top: "res37_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res37_conv1_relu" | |
type: "ReLU" | |
bottom: "res37_conv1" | |
top: "res37_conv1" | |
} | |
layer { | |
name: "res37_conv2" | |
type: "Convolution" | |
bottom: "res37_conv1" | |
top: "res37_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res37_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res37_conv2" | |
top: "res37_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res37_conv2_scale" | |
type: "Scale" | |
bottom: "res37_conv2" | |
top: "res37_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res37_conv2_relu" | |
type: "ReLU" | |
bottom: "res37_conv2" | |
top: "res37_conv2" | |
} | |
layer { | |
name: "res37_conv3" | |
type: "Convolution" | |
bottom: "res37_conv2" | |
top: "res37_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res37_eletwise" | |
type: "Eltwise" | |
bottom: "res36_eletwise" | |
bottom: "res37_conv3" | |
top: "res37_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res38_bn" | |
type: "BatchNorm" | |
bottom: "res37_eletwise" | |
top: "res38_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res38_scale" | |
type: "Scale" | |
bottom: "res38_bn" | |
top: "res38_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res38_relu" | |
type: "ReLU" | |
bottom: "res38_bn" | |
top: "res38_bn" | |
} | |
layer { | |
name: "res38_conv1" | |
type: "Convolution" | |
bottom: "res38_bn" | |
top: "res38_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res38_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res38_conv1" | |
top: "res38_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res38_conv1_scale" | |
type: "Scale" | |
bottom: "res38_conv1" | |
top: "res38_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res38_conv1_relu" | |
type: "ReLU" | |
bottom: "res38_conv1" | |
top: "res38_conv1" | |
} | |
layer { | |
name: "res38_conv2" | |
type: "Convolution" | |
bottom: "res38_conv1" | |
top: "res38_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res38_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res38_conv2" | |
top: "res38_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res38_conv2_scale" | |
type: "Scale" | |
bottom: "res38_conv2" | |
top: "res38_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res38_conv2_relu" | |
type: "ReLU" | |
bottom: "res38_conv2" | |
top: "res38_conv2" | |
} | |
layer { | |
name: "res38_conv3" | |
type: "Convolution" | |
bottom: "res38_conv2" | |
top: "res38_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res38_eletwise" | |
type: "Eltwise" | |
bottom: "res37_eletwise" | |
bottom: "res38_conv3" | |
top: "res38_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res39_bn" | |
type: "BatchNorm" | |
bottom: "res38_eletwise" | |
top: "res39_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res39_scale" | |
type: "Scale" | |
bottom: "res39_bn" | |
top: "res39_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res39_relu" | |
type: "ReLU" | |
bottom: "res39_bn" | |
top: "res39_bn" | |
} | |
layer { | |
name: "res39_conv1" | |
type: "Convolution" | |
bottom: "res39_bn" | |
top: "res39_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res39_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res39_conv1" | |
top: "res39_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res39_conv1_scale" | |
type: "Scale" | |
bottom: "res39_conv1" | |
top: "res39_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res39_conv1_relu" | |
type: "ReLU" | |
bottom: "res39_conv1" | |
top: "res39_conv1" | |
} | |
layer { | |
name: "res39_conv2" | |
type: "Convolution" | |
bottom: "res39_conv1" | |
top: "res39_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res39_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res39_conv2" | |
top: "res39_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res39_conv2_scale" | |
type: "Scale" | |
bottom: "res39_conv2" | |
top: "res39_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res39_conv2_relu" | |
type: "ReLU" | |
bottom: "res39_conv2" | |
top: "res39_conv2" | |
} | |
layer { | |
name: "res39_conv3" | |
type: "Convolution" | |
bottom: "res39_conv2" | |
top: "res39_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res39_eletwise" | |
type: "Eltwise" | |
bottom: "res38_eletwise" | |
bottom: "res39_conv3" | |
top: "res39_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res40_bn" | |
type: "BatchNorm" | |
bottom: "res39_eletwise" | |
top: "res40_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res40_scale" | |
type: "Scale" | |
bottom: "res40_bn" | |
top: "res40_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res40_relu" | |
type: "ReLU" | |
bottom: "res40_bn" | |
top: "res40_bn" | |
} | |
layer { | |
name: "res40_conv1" | |
type: "Convolution" | |
bottom: "res40_bn" | |
top: "res40_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res40_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res40_conv1" | |
top: "res40_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res40_conv1_scale" | |
type: "Scale" | |
bottom: "res40_conv1" | |
top: "res40_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res40_conv1_relu" | |
type: "ReLU" | |
bottom: "res40_conv1" | |
top: "res40_conv1" | |
} | |
layer { | |
name: "res40_conv2" | |
type: "Convolution" | |
bottom: "res40_conv1" | |
top: "res40_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res40_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res40_conv2" | |
top: "res40_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res40_conv2_scale" | |
type: "Scale" | |
bottom: "res40_conv2" | |
top: "res40_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res40_conv2_relu" | |
type: "ReLU" | |
bottom: "res40_conv2" | |
top: "res40_conv2" | |
} | |
layer { | |
name: "res40_conv3" | |
type: "Convolution" | |
bottom: "res40_conv2" | |
top: "res40_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res40_eletwise" | |
type: "Eltwise" | |
bottom: "res39_eletwise" | |
bottom: "res40_conv3" | |
top: "res40_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res41_bn" | |
type: "BatchNorm" | |
bottom: "res40_eletwise" | |
top: "res41_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res41_scale" | |
type: "Scale" | |
bottom: "res41_bn" | |
top: "res41_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res41_relu" | |
type: "ReLU" | |
bottom: "res41_bn" | |
top: "res41_bn" | |
} | |
layer { | |
name: "res41_conv1" | |
type: "Convolution" | |
bottom: "res41_bn" | |
top: "res41_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res41_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res41_conv1" | |
top: "res41_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res41_conv1_scale" | |
type: "Scale" | |
bottom: "res41_conv1" | |
top: "res41_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res41_conv1_relu" | |
type: "ReLU" | |
bottom: "res41_conv1" | |
top: "res41_conv1" | |
} | |
layer { | |
name: "res41_conv2" | |
type: "Convolution" | |
bottom: "res41_conv1" | |
top: "res41_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res41_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res41_conv2" | |
top: "res41_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res41_conv2_scale" | |
type: "Scale" | |
bottom: "res41_conv2" | |
top: "res41_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res41_conv2_relu" | |
type: "ReLU" | |
bottom: "res41_conv2" | |
top: "res41_conv2" | |
} | |
layer { | |
name: "res41_conv3" | |
type: "Convolution" | |
bottom: "res41_conv2" | |
top: "res41_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res41_eletwise" | |
type: "Eltwise" | |
bottom: "res40_eletwise" | |
bottom: "res41_conv3" | |
top: "res41_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res42_bn" | |
type: "BatchNorm" | |
bottom: "res41_eletwise" | |
top: "res42_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res42_scale" | |
type: "Scale" | |
bottom: "res42_bn" | |
top: "res42_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res42_relu" | |
type: "ReLU" | |
bottom: "res42_bn" | |
top: "res42_bn" | |
} | |
layer { | |
name: "res42_conv1" | |
type: "Convolution" | |
bottom: "res42_bn" | |
top: "res42_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res42_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res42_conv1" | |
top: "res42_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res42_conv1_scale" | |
type: "Scale" | |
bottom: "res42_conv1" | |
top: "res42_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res42_conv1_relu" | |
type: "ReLU" | |
bottom: "res42_conv1" | |
top: "res42_conv1" | |
} | |
layer { | |
name: "res42_conv2" | |
type: "Convolution" | |
bottom: "res42_conv1" | |
top: "res42_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res42_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res42_conv2" | |
top: "res42_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res42_conv2_scale" | |
type: "Scale" | |
bottom: "res42_conv2" | |
top: "res42_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res42_conv2_relu" | |
type: "ReLU" | |
bottom: "res42_conv2" | |
top: "res42_conv2" | |
} | |
layer { | |
name: "res42_conv3" | |
type: "Convolution" | |
bottom: "res42_conv2" | |
top: "res42_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res42_eletwise" | |
type: "Eltwise" | |
bottom: "res41_eletwise" | |
bottom: "res42_conv3" | |
top: "res42_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res43_bn" | |
type: "BatchNorm" | |
bottom: "res42_eletwise" | |
top: "res43_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res43_scale" | |
type: "Scale" | |
bottom: "res43_bn" | |
top: "res43_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res43_relu" | |
type: "ReLU" | |
bottom: "res43_bn" | |
top: "res43_bn" | |
} | |
layer { | |
name: "res43_conv1" | |
type: "Convolution" | |
bottom: "res43_bn" | |
top: "res43_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res43_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res43_conv1" | |
top: "res43_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res43_conv1_scale" | |
type: "Scale" | |
bottom: "res43_conv1" | |
top: "res43_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res43_conv1_relu" | |
type: "ReLU" | |
bottom: "res43_conv1" | |
top: "res43_conv1" | |
} | |
layer { | |
name: "res43_conv2" | |
type: "Convolution" | |
bottom: "res43_conv1" | |
top: "res43_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res43_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res43_conv2" | |
top: "res43_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res43_conv2_scale" | |
type: "Scale" | |
bottom: "res43_conv2" | |
top: "res43_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res43_conv2_relu" | |
type: "ReLU" | |
bottom: "res43_conv2" | |
top: "res43_conv2" | |
} | |
layer { | |
name: "res43_conv3" | |
type: "Convolution" | |
bottom: "res43_conv2" | |
top: "res43_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res43_eletwise" | |
type: "Eltwise" | |
bottom: "res42_eletwise" | |
bottom: "res43_conv3" | |
top: "res43_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res44_bn" | |
type: "BatchNorm" | |
bottom: "res43_eletwise" | |
top: "res44_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res44_scale" | |
type: "Scale" | |
bottom: "res44_bn" | |
top: "res44_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res44_relu" | |
type: "ReLU" | |
bottom: "res44_bn" | |
top: "res44_bn" | |
} | |
layer { | |
name: "res44_conv1" | |
type: "Convolution" | |
bottom: "res44_bn" | |
top: "res44_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res44_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res44_conv1" | |
top: "res44_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res44_conv1_scale" | |
type: "Scale" | |
bottom: "res44_conv1" | |
top: "res44_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res44_conv1_relu" | |
type: "ReLU" | |
bottom: "res44_conv1" | |
top: "res44_conv1" | |
} | |
layer { | |
name: "res44_conv2" | |
type: "Convolution" | |
bottom: "res44_conv1" | |
top: "res44_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res44_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res44_conv2" | |
top: "res44_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res44_conv2_scale" | |
type: "Scale" | |
bottom: "res44_conv2" | |
top: "res44_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res44_conv2_relu" | |
type: "ReLU" | |
bottom: "res44_conv2" | |
top: "res44_conv2" | |
} | |
layer { | |
name: "res44_conv3" | |
type: "Convolution" | |
bottom: "res44_conv2" | |
top: "res44_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res44_eletwise" | |
type: "Eltwise" | |
bottom: "res43_eletwise" | |
bottom: "res44_conv3" | |
top: "res44_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res45_bn" | |
type: "BatchNorm" | |
bottom: "res44_eletwise" | |
top: "res45_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res45_scale" | |
type: "Scale" | |
bottom: "res45_bn" | |
top: "res45_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res45_relu" | |
type: "ReLU" | |
bottom: "res45_bn" | |
top: "res45_bn" | |
} | |
layer { | |
name: "res45_conv1" | |
type: "Convolution" | |
bottom: "res45_bn" | |
top: "res45_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res45_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res45_conv1" | |
top: "res45_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res45_conv1_scale" | |
type: "Scale" | |
bottom: "res45_conv1" | |
top: "res45_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res45_conv1_relu" | |
type: "ReLU" | |
bottom: "res45_conv1" | |
top: "res45_conv1" | |
} | |
layer { | |
name: "res45_conv2" | |
type: "Convolution" | |
bottom: "res45_conv1" | |
top: "res45_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res45_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res45_conv2" | |
top: "res45_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res45_conv2_scale" | |
type: "Scale" | |
bottom: "res45_conv2" | |
top: "res45_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res45_conv2_relu" | |
type: "ReLU" | |
bottom: "res45_conv2" | |
top: "res45_conv2" | |
} | |
layer { | |
name: "res45_conv3" | |
type: "Convolution" | |
bottom: "res45_conv2" | |
top: "res45_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res45_eletwise" | |
type: "Eltwise" | |
bottom: "res44_eletwise" | |
bottom: "res45_conv3" | |
top: "res45_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res46_bn" | |
type: "BatchNorm" | |
bottom: "res45_eletwise" | |
top: "res46_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res46_scale" | |
type: "Scale" | |
bottom: "res46_bn" | |
top: "res46_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res46_relu" | |
type: "ReLU" | |
bottom: "res46_bn" | |
top: "res46_bn" | |
} | |
layer { | |
name: "res46_conv1" | |
type: "Convolution" | |
bottom: "res46_bn" | |
top: "res46_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res46_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res46_conv1" | |
top: "res46_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res46_conv1_scale" | |
type: "Scale" | |
bottom: "res46_conv1" | |
top: "res46_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res46_conv1_relu" | |
type: "ReLU" | |
bottom: "res46_conv1" | |
top: "res46_conv1" | |
} | |
layer { | |
name: "res46_conv2" | |
type: "Convolution" | |
bottom: "res46_conv1" | |
top: "res46_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res46_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res46_conv2" | |
top: "res46_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res46_conv2_scale" | |
type: "Scale" | |
bottom: "res46_conv2" | |
top: "res46_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res46_conv2_relu" | |
type: "ReLU" | |
bottom: "res46_conv2" | |
top: "res46_conv2" | |
} | |
layer { | |
name: "res46_conv3" | |
type: "Convolution" | |
bottom: "res46_conv2" | |
top: "res46_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res46_eletwise" | |
type: "Eltwise" | |
bottom: "res45_eletwise" | |
bottom: "res46_conv3" | |
top: "res46_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res47_bn" | |
type: "BatchNorm" | |
bottom: "res46_eletwise" | |
top: "res47_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res47_scale" | |
type: "Scale" | |
bottom: "res47_bn" | |
top: "res47_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res47_relu" | |
type: "ReLU" | |
bottom: "res47_bn" | |
top: "res47_bn" | |
} | |
layer { | |
name: "res47_conv1" | |
type: "Convolution" | |
bottom: "res47_bn" | |
top: "res47_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res47_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res47_conv1" | |
top: "res47_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res47_conv1_scale" | |
type: "Scale" | |
bottom: "res47_conv1" | |
top: "res47_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res47_conv1_relu" | |
type: "ReLU" | |
bottom: "res47_conv1" | |
top: "res47_conv1" | |
} | |
layer { | |
name: "res47_conv2" | |
type: "Convolution" | |
bottom: "res47_conv1" | |
top: "res47_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res47_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res47_conv2" | |
top: "res47_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res47_conv2_scale" | |
type: "Scale" | |
bottom: "res47_conv2" | |
top: "res47_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res47_conv2_relu" | |
type: "ReLU" | |
bottom: "res47_conv2" | |
top: "res47_conv2" | |
} | |
layer { | |
name: "res47_conv3" | |
type: "Convolution" | |
bottom: "res47_conv2" | |
top: "res47_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res47_eletwise" | |
type: "Eltwise" | |
bottom: "res46_eletwise" | |
bottom: "res47_conv3" | |
top: "res47_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res48_bn" | |
type: "BatchNorm" | |
bottom: "res47_eletwise" | |
top: "res48_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res48_scale" | |
type: "Scale" | |
bottom: "res48_bn" | |
top: "res48_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res48_relu" | |
type: "ReLU" | |
bottom: "res48_bn" | |
top: "res48_bn" | |
} | |
layer { | |
name: "res48_conv1" | |
type: "Convolution" | |
bottom: "res48_bn" | |
top: "res48_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res48_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res48_conv1" | |
top: "res48_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res48_conv1_scale" | |
type: "Scale" | |
bottom: "res48_conv1" | |
top: "res48_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res48_conv1_relu" | |
type: "ReLU" | |
bottom: "res48_conv1" | |
top: "res48_conv1" | |
} | |
layer { | |
name: "res48_conv2" | |
type: "Convolution" | |
bottom: "res48_conv1" | |
top: "res48_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res48_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res48_conv2" | |
top: "res48_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res48_conv2_scale" | |
type: "Scale" | |
bottom: "res48_conv2" | |
top: "res48_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res48_conv2_relu" | |
type: "ReLU" | |
bottom: "res48_conv2" | |
top: "res48_conv2" | |
} | |
layer { | |
name: "res48_conv3" | |
type: "Convolution" | |
bottom: "res48_conv2" | |
top: "res48_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res48_eletwise" | |
type: "Eltwise" | |
bottom: "res47_eletwise" | |
bottom: "res48_conv3" | |
top: "res48_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res49_bn" | |
type: "BatchNorm" | |
bottom: "res48_eletwise" | |
top: "res49_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res49_scale" | |
type: "Scale" | |
bottom: "res49_bn" | |
top: "res49_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res49_relu" | |
type: "ReLU" | |
bottom: "res49_bn" | |
top: "res49_bn" | |
} | |
layer { | |
name: "res49_conv1" | |
type: "Convolution" | |
bottom: "res49_bn" | |
top: "res49_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res49_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res49_conv1" | |
top: "res49_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res49_conv1_scale" | |
type: "Scale" | |
bottom: "res49_conv1" | |
top: "res49_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res49_conv1_relu" | |
type: "ReLU" | |
bottom: "res49_conv1" | |
top: "res49_conv1" | |
} | |
layer { | |
name: "res49_conv2" | |
type: "Convolution" | |
bottom: "res49_conv1" | |
top: "res49_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res49_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res49_conv2" | |
top: "res49_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res49_conv2_scale" | |
type: "Scale" | |
bottom: "res49_conv2" | |
top: "res49_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res49_conv2_relu" | |
type: "ReLU" | |
bottom: "res49_conv2" | |
top: "res49_conv2" | |
} | |
layer { | |
name: "res49_conv3" | |
type: "Convolution" | |
bottom: "res49_conv2" | |
top: "res49_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res49_eletwise" | |
type: "Eltwise" | |
bottom: "res48_eletwise" | |
bottom: "res49_conv3" | |
top: "res49_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res50_bn" | |
type: "BatchNorm" | |
bottom: "res49_eletwise" | |
top: "res50_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res50_scale" | |
type: "Scale" | |
bottom: "res50_bn" | |
top: "res50_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res50_relu" | |
type: "ReLU" | |
bottom: "res50_bn" | |
top: "res50_bn" | |
} | |
layer { | |
name: "res50_conv1" | |
type: "Convolution" | |
bottom: "res50_bn" | |
top: "res50_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res50_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res50_conv1" | |
top: "res50_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res50_conv1_scale" | |
type: "Scale" | |
bottom: "res50_conv1" | |
top: "res50_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res50_conv1_relu" | |
type: "ReLU" | |
bottom: "res50_conv1" | |
top: "res50_conv1" | |
} | |
layer { | |
name: "res50_conv2" | |
type: "Convolution" | |
bottom: "res50_conv1" | |
top: "res50_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res50_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res50_conv2" | |
top: "res50_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res50_conv2_scale" | |
type: "Scale" | |
bottom: "res50_conv2" | |
top: "res50_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res50_conv2_relu" | |
type: "ReLU" | |
bottom: "res50_conv2" | |
top: "res50_conv2" | |
} | |
layer { | |
name: "res50_conv3" | |
type: "Convolution" | |
bottom: "res50_conv2" | |
top: "res50_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res50_eletwise" | |
type: "Eltwise" | |
bottom: "res49_eletwise" | |
bottom: "res50_conv3" | |
top: "res50_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res51_bn" | |
type: "BatchNorm" | |
bottom: "res50_eletwise" | |
top: "res51_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res51_scale" | |
type: "Scale" | |
bottom: "res51_bn" | |
top: "res51_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res51_relu" | |
type: "ReLU" | |
bottom: "res51_bn" | |
top: "res51_bn" | |
} | |
layer { | |
name: "res51_conv1" | |
type: "Convolution" | |
bottom: "res51_bn" | |
top: "res51_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res51_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res51_conv1" | |
top: "res51_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res51_conv1_scale" | |
type: "Scale" | |
bottom: "res51_conv1" | |
top: "res51_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res51_conv1_relu" | |
type: "ReLU" | |
bottom: "res51_conv1" | |
top: "res51_conv1" | |
} | |
layer { | |
name: "res51_conv2" | |
type: "Convolution" | |
bottom: "res51_conv1" | |
top: "res51_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res51_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res51_conv2" | |
top: "res51_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res51_conv2_scale" | |
type: "Scale" | |
bottom: "res51_conv2" | |
top: "res51_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res51_conv2_relu" | |
type: "ReLU" | |
bottom: "res51_conv2" | |
top: "res51_conv2" | |
} | |
layer { | |
name: "res51_conv3" | |
type: "Convolution" | |
bottom: "res51_conv2" | |
top: "res51_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res51_eletwise" | |
type: "Eltwise" | |
bottom: "res50_eletwise" | |
bottom: "res51_conv3" | |
top: "res51_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res52_bn" | |
type: "BatchNorm" | |
bottom: "res51_eletwise" | |
top: "res52_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res52_scale" | |
type: "Scale" | |
bottom: "res52_bn" | |
top: "res52_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res52_relu" | |
type: "ReLU" | |
bottom: "res52_bn" | |
top: "res52_bn" | |
} | |
layer { | |
name: "res52_conv1" | |
type: "Convolution" | |
bottom: "res52_bn" | |
top: "res52_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res52_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res52_conv1" | |
top: "res52_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res52_conv1_scale" | |
type: "Scale" | |
bottom: "res52_conv1" | |
top: "res52_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res52_conv1_relu" | |
type: "ReLU" | |
bottom: "res52_conv1" | |
top: "res52_conv1" | |
} | |
layer { | |
name: "res52_conv2" | |
type: "Convolution" | |
bottom: "res52_conv1" | |
top: "res52_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res52_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res52_conv2" | |
top: "res52_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res52_conv2_scale" | |
type: "Scale" | |
bottom: "res52_conv2" | |
top: "res52_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res52_conv2_relu" | |
type: "ReLU" | |
bottom: "res52_conv2" | |
top: "res52_conv2" | |
} | |
layer { | |
name: "res52_conv3" | |
type: "Convolution" | |
bottom: "res52_conv2" | |
top: "res52_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res52_eletwise" | |
type: "Eltwise" | |
bottom: "res51_eletwise" | |
bottom: "res52_conv3" | |
top: "res52_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res53_bn" | |
type: "BatchNorm" | |
bottom: "res52_eletwise" | |
top: "res53_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res53_scale" | |
type: "Scale" | |
bottom: "res53_bn" | |
top: "res53_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res53_relu" | |
type: "ReLU" | |
bottom: "res53_bn" | |
top: "res53_bn" | |
} | |
layer { | |
name: "res53_conv1" | |
type: "Convolution" | |
bottom: "res53_bn" | |
top: "res53_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res53_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res53_conv1" | |
top: "res53_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res53_conv1_scale" | |
type: "Scale" | |
bottom: "res53_conv1" | |
top: "res53_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res53_conv1_relu" | |
type: "ReLU" | |
bottom: "res53_conv1" | |
top: "res53_conv1" | |
} | |
layer { | |
name: "res53_conv2" | |
type: "Convolution" | |
bottom: "res53_conv1" | |
top: "res53_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res53_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res53_conv2" | |
top: "res53_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res53_conv2_scale" | |
type: "Scale" | |
bottom: "res53_conv2" | |
top: "res53_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res53_conv2_relu" | |
type: "ReLU" | |
bottom: "res53_conv2" | |
top: "res53_conv2" | |
} | |
layer { | |
name: "res53_conv3" | |
type: "Convolution" | |
bottom: "res53_conv2" | |
top: "res53_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res53_eletwise" | |
type: "Eltwise" | |
bottom: "res52_eletwise" | |
bottom: "res53_conv3" | |
top: "res53_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res54_bn" | |
type: "BatchNorm" | |
bottom: "res53_eletwise" | |
top: "res54_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res54_scale" | |
type: "Scale" | |
bottom: "res54_bn" | |
top: "res54_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res54_relu" | |
type: "ReLU" | |
bottom: "res54_bn" | |
top: "res54_bn" | |
} | |
layer { | |
name: "res54_conv1" | |
type: "Convolution" | |
bottom: "res54_bn" | |
top: "res54_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res54_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res54_conv1" | |
top: "res54_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res54_conv1_scale" | |
type: "Scale" | |
bottom: "res54_conv1" | |
top: "res54_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res54_conv1_relu" | |
type: "ReLU" | |
bottom: "res54_conv1" | |
top: "res54_conv1" | |
} | |
layer { | |
name: "res54_conv2" | |
type: "Convolution" | |
bottom: "res54_conv1" | |
top: "res54_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res54_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res54_conv2" | |
top: "res54_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res54_conv2_scale" | |
type: "Scale" | |
bottom: "res54_conv2" | |
top: "res54_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res54_conv2_relu" | |
type: "ReLU" | |
bottom: "res54_conv2" | |
top: "res54_conv2" | |
} | |
layer { | |
name: "res54_conv3" | |
type: "Convolution" | |
bottom: "res54_conv2" | |
top: "res54_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res54_eletwise" | |
type: "Eltwise" | |
bottom: "res53_eletwise" | |
bottom: "res54_conv3" | |
top: "res54_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res55_bn" | |
type: "BatchNorm" | |
bottom: "res54_eletwise" | |
top: "res55_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res55_scale" | |
type: "Scale" | |
bottom: "res55_bn" | |
top: "res55_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res55_relu" | |
type: "ReLU" | |
bottom: "res55_bn" | |
top: "res55_bn" | |
} | |
layer { | |
name: "res55_conv1" | |
type: "Convolution" | |
bottom: "res55_bn" | |
top: "res55_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res55_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res55_conv1" | |
top: "res55_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res55_conv1_scale" | |
type: "Scale" | |
bottom: "res55_conv1" | |
top: "res55_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res55_conv1_relu" | |
type: "ReLU" | |
bottom: "res55_conv1" | |
top: "res55_conv1" | |
} | |
layer { | |
name: "res55_conv2" | |
type: "Convolution" | |
bottom: "res55_conv1" | |
top: "res55_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res55_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res55_conv2" | |
top: "res55_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res55_conv2_scale" | |
type: "Scale" | |
bottom: "res55_conv2" | |
top: "res55_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res55_conv2_relu" | |
type: "ReLU" | |
bottom: "res55_conv2" | |
top: "res55_conv2" | |
} | |
layer { | |
name: "res55_conv3" | |
type: "Convolution" | |
bottom: "res55_conv2" | |
top: "res55_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res55_eletwise" | |
type: "Eltwise" | |
bottom: "res54_eletwise" | |
bottom: "res55_conv3" | |
top: "res55_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res56_bn" | |
type: "BatchNorm" | |
bottom: "res55_eletwise" | |
top: "res56_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res56_scale" | |
type: "Scale" | |
bottom: "res56_bn" | |
top: "res56_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res56_relu" | |
type: "ReLU" | |
bottom: "res56_bn" | |
top: "res56_bn" | |
} | |
layer { | |
name: "res56_conv1" | |
type: "Convolution" | |
bottom: "res56_bn" | |
top: "res56_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res56_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res56_conv1" | |
top: "res56_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res56_conv1_scale" | |
type: "Scale" | |
bottom: "res56_conv1" | |
top: "res56_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res56_conv1_relu" | |
type: "ReLU" | |
bottom: "res56_conv1" | |
top: "res56_conv1" | |
} | |
layer { | |
name: "res56_conv2" | |
type: "Convolution" | |
bottom: "res56_conv1" | |
top: "res56_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res56_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res56_conv2" | |
top: "res56_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res56_conv2_scale" | |
type: "Scale" | |
bottom: "res56_conv2" | |
top: "res56_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res56_conv2_relu" | |
type: "ReLU" | |
bottom: "res56_conv2" | |
top: "res56_conv2" | |
} | |
layer { | |
name: "res56_conv3" | |
type: "Convolution" | |
bottom: "res56_conv2" | |
top: "res56_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res56_eletwise" | |
type: "Eltwise" | |
bottom: "res55_eletwise" | |
bottom: "res56_conv3" | |
top: "res56_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res57_bn" | |
type: "BatchNorm" | |
bottom: "res56_eletwise" | |
top: "res57_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res57_scale" | |
type: "Scale" | |
bottom: "res57_bn" | |
top: "res57_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res57_relu" | |
type: "ReLU" | |
bottom: "res57_bn" | |
top: "res57_bn" | |
} | |
layer { | |
name: "res57_conv1" | |
type: "Convolution" | |
bottom: "res57_bn" | |
top: "res57_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res57_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res57_conv1" | |
top: "res57_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res57_conv1_scale" | |
type: "Scale" | |
bottom: "res57_conv1" | |
top: "res57_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res57_conv1_relu" | |
type: "ReLU" | |
bottom: "res57_conv1" | |
top: "res57_conv1" | |
} | |
layer { | |
name: "res57_conv2" | |
type: "Convolution" | |
bottom: "res57_conv1" | |
top: "res57_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res57_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res57_conv2" | |
top: "res57_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res57_conv2_scale" | |
type: "Scale" | |
bottom: "res57_conv2" | |
top: "res57_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res57_conv2_relu" | |
type: "ReLU" | |
bottom: "res57_conv2" | |
top: "res57_conv2" | |
} | |
layer { | |
name: "res57_conv3" | |
type: "Convolution" | |
bottom: "res57_conv2" | |
top: "res57_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res57_eletwise" | |
type: "Eltwise" | |
bottom: "res56_eletwise" | |
bottom: "res57_conv3" | |
top: "res57_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res58_bn" | |
type: "BatchNorm" | |
bottom: "res57_eletwise" | |
top: "res58_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res58_scale" | |
type: "Scale" | |
bottom: "res58_bn" | |
top: "res58_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res58_relu" | |
type: "ReLU" | |
bottom: "res58_bn" | |
top: "res58_bn" | |
} | |
layer { | |
name: "res58_conv1" | |
type: "Convolution" | |
bottom: "res58_bn" | |
top: "res58_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res58_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res58_conv1" | |
top: "res58_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res58_conv1_scale" | |
type: "Scale" | |
bottom: "res58_conv1" | |
top: "res58_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res58_conv1_relu" | |
type: "ReLU" | |
bottom: "res58_conv1" | |
top: "res58_conv1" | |
} | |
layer { | |
name: "res58_conv2" | |
type: "Convolution" | |
bottom: "res58_conv1" | |
top: "res58_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res58_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res58_conv2" | |
top: "res58_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res58_conv2_scale" | |
type: "Scale" | |
bottom: "res58_conv2" | |
top: "res58_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res58_conv2_relu" | |
type: "ReLU" | |
bottom: "res58_conv2" | |
top: "res58_conv2" | |
} | |
layer { | |
name: "res58_conv3" | |
type: "Convolution" | |
bottom: "res58_conv2" | |
top: "res58_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res58_eletwise" | |
type: "Eltwise" | |
bottom: "res57_eletwise" | |
bottom: "res58_conv3" | |
top: "res58_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res59_bn" | |
type: "BatchNorm" | |
bottom: "res58_eletwise" | |
top: "res59_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res59_scale" | |
type: "Scale" | |
bottom: "res59_bn" | |
top: "res59_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res59_relu" | |
type: "ReLU" | |
bottom: "res59_bn" | |
top: "res59_bn" | |
} | |
layer { | |
name: "res59_conv1" | |
type: "Convolution" | |
bottom: "res59_bn" | |
top: "res59_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res59_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res59_conv1" | |
top: "res59_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res59_conv1_scale" | |
type: "Scale" | |
bottom: "res59_conv1" | |
top: "res59_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res59_conv1_relu" | |
type: "ReLU" | |
bottom: "res59_conv1" | |
top: "res59_conv1" | |
} | |
layer { | |
name: "res59_conv2" | |
type: "Convolution" | |
bottom: "res59_conv1" | |
top: "res59_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res59_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res59_conv2" | |
top: "res59_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res59_conv2_scale" | |
type: "Scale" | |
bottom: "res59_conv2" | |
top: "res59_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res59_conv2_relu" | |
type: "ReLU" | |
bottom: "res59_conv2" | |
top: "res59_conv2" | |
} | |
layer { | |
name: "res59_conv3" | |
type: "Convolution" | |
bottom: "res59_conv2" | |
top: "res59_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res59_eletwise" | |
type: "Eltwise" | |
bottom: "res58_eletwise" | |
bottom: "res59_conv3" | |
top: "res59_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res60_bn" | |
type: "BatchNorm" | |
bottom: "res59_eletwise" | |
top: "res60_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res60_scale" | |
type: "Scale" | |
bottom: "res60_bn" | |
top: "res60_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res60_relu" | |
type: "ReLU" | |
bottom: "res60_bn" | |
top: "res60_bn" | |
} | |
layer { | |
name: "res60_conv1" | |
type: "Convolution" | |
bottom: "res60_bn" | |
top: "res60_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res60_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res60_conv1" | |
top: "res60_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res60_conv1_scale" | |
type: "Scale" | |
bottom: "res60_conv1" | |
top: "res60_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res60_conv1_relu" | |
type: "ReLU" | |
bottom: "res60_conv1" | |
top: "res60_conv1" | |
} | |
layer { | |
name: "res60_conv2" | |
type: "Convolution" | |
bottom: "res60_conv1" | |
top: "res60_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res60_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res60_conv2" | |
top: "res60_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res60_conv2_scale" | |
type: "Scale" | |
bottom: "res60_conv2" | |
top: "res60_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res60_conv2_relu" | |
type: "ReLU" | |
bottom: "res60_conv2" | |
top: "res60_conv2" | |
} | |
layer { | |
name: "res60_conv3" | |
type: "Convolution" | |
bottom: "res60_conv2" | |
top: "res60_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res60_eletwise" | |
type: "Eltwise" | |
bottom: "res59_eletwise" | |
bottom: "res60_conv3" | |
top: "res60_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res61_bn" | |
type: "BatchNorm" | |
bottom: "res60_eletwise" | |
top: "res61_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res61_scale" | |
type: "Scale" | |
bottom: "res61_bn" | |
top: "res61_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res61_relu" | |
type: "ReLU" | |
bottom: "res61_bn" | |
top: "res61_bn" | |
} | |
layer { | |
name: "res61_conv1" | |
type: "Convolution" | |
bottom: "res61_bn" | |
top: "res61_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res61_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res61_conv1" | |
top: "res61_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res61_conv1_scale" | |
type: "Scale" | |
bottom: "res61_conv1" | |
top: "res61_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res61_conv1_relu" | |
type: "ReLU" | |
bottom: "res61_conv1" | |
top: "res61_conv1" | |
} | |
layer { | |
name: "res61_conv2" | |
type: "Convolution" | |
bottom: "res61_conv1" | |
top: "res61_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res61_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res61_conv2" | |
top: "res61_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res61_conv2_scale" | |
type: "Scale" | |
bottom: "res61_conv2" | |
top: "res61_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res61_conv2_relu" | |
type: "ReLU" | |
bottom: "res61_conv2" | |
top: "res61_conv2" | |
} | |
layer { | |
name: "res61_conv3" | |
type: "Convolution" | |
bottom: "res61_conv2" | |
top: "res61_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res61_eletwise" | |
type: "Eltwise" | |
bottom: "res60_eletwise" | |
bottom: "res61_conv3" | |
top: "res61_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res62_bn" | |
type: "BatchNorm" | |
bottom: "res61_eletwise" | |
top: "res62_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res62_scale" | |
type: "Scale" | |
bottom: "res62_bn" | |
top: "res62_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res62_relu" | |
type: "ReLU" | |
bottom: "res62_bn" | |
top: "res62_bn" | |
} | |
layer { | |
name: "res62_conv1" | |
type: "Convolution" | |
bottom: "res62_bn" | |
top: "res62_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res62_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res62_conv1" | |
top: "res62_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res62_conv1_scale" | |
type: "Scale" | |
bottom: "res62_conv1" | |
top: "res62_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res62_conv1_relu" | |
type: "ReLU" | |
bottom: "res62_conv1" | |
top: "res62_conv1" | |
} | |
layer { | |
name: "res62_conv2" | |
type: "Convolution" | |
bottom: "res62_conv1" | |
top: "res62_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res62_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res62_conv2" | |
top: "res62_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res62_conv2_scale" | |
type: "Scale" | |
bottom: "res62_conv2" | |
top: "res62_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res62_conv2_relu" | |
type: "ReLU" | |
bottom: "res62_conv2" | |
top: "res62_conv2" | |
} | |
layer { | |
name: "res62_conv3" | |
type: "Convolution" | |
bottom: "res62_conv2" | |
top: "res62_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res62_eletwise" | |
type: "Eltwise" | |
bottom: "res61_eletwise" | |
bottom: "res62_conv3" | |
top: "res62_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res63_bn" | |
type: "BatchNorm" | |
bottom: "res62_eletwise" | |
top: "res63_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res63_scale" | |
type: "Scale" | |
bottom: "res63_bn" | |
top: "res63_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res63_relu" | |
type: "ReLU" | |
bottom: "res63_bn" | |
top: "res63_bn" | |
} | |
layer { | |
name: "res63_conv1" | |
type: "Convolution" | |
bottom: "res63_bn" | |
top: "res63_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res63_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res63_conv1" | |
top: "res63_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res63_conv1_scale" | |
type: "Scale" | |
bottom: "res63_conv1" | |
top: "res63_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res63_conv1_relu" | |
type: "ReLU" | |
bottom: "res63_conv1" | |
top: "res63_conv1" | |
} | |
layer { | |
name: "res63_conv2" | |
type: "Convolution" | |
bottom: "res63_conv1" | |
top: "res63_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res63_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res63_conv2" | |
top: "res63_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res63_conv2_scale" | |
type: "Scale" | |
bottom: "res63_conv2" | |
top: "res63_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res63_conv2_relu" | |
type: "ReLU" | |
bottom: "res63_conv2" | |
top: "res63_conv2" | |
} | |
layer { | |
name: "res63_conv3" | |
type: "Convolution" | |
bottom: "res63_conv2" | |
top: "res63_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res63_eletwise" | |
type: "Eltwise" | |
bottom: "res62_eletwise" | |
bottom: "res63_conv3" | |
top: "res63_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res64_bn" | |
type: "BatchNorm" | |
bottom: "res63_eletwise" | |
top: "res64_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res64_scale" | |
type: "Scale" | |
bottom: "res64_bn" | |
top: "res64_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res64_relu" | |
type: "ReLU" | |
bottom: "res64_bn" | |
top: "res64_bn" | |
} | |
layer { | |
name: "res64_conv1" | |
type: "Convolution" | |
bottom: "res64_bn" | |
top: "res64_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res64_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res64_conv1" | |
top: "res64_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res64_conv1_scale" | |
type: "Scale" | |
bottom: "res64_conv1" | |
top: "res64_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res64_conv1_relu" | |
type: "ReLU" | |
bottom: "res64_conv1" | |
top: "res64_conv1" | |
} | |
layer { | |
name: "res64_conv2" | |
type: "Convolution" | |
bottom: "res64_conv1" | |
top: "res64_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res64_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res64_conv2" | |
top: "res64_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res64_conv2_scale" | |
type: "Scale" | |
bottom: "res64_conv2" | |
top: "res64_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res64_conv2_relu" | |
type: "ReLU" | |
bottom: "res64_conv2" | |
top: "res64_conv2" | |
} | |
layer { | |
name: "res64_conv3" | |
type: "Convolution" | |
bottom: "res64_conv2" | |
top: "res64_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res64_eletwise" | |
type: "Eltwise" | |
bottom: "res63_eletwise" | |
bottom: "res64_conv3" | |
top: "res64_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res65_bn" | |
type: "BatchNorm" | |
bottom: "res64_eletwise" | |
top: "res65_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res65_scale" | |
type: "Scale" | |
bottom: "res65_bn" | |
top: "res65_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res65_relu" | |
type: "ReLU" | |
bottom: "res65_bn" | |
top: "res65_bn" | |
} | |
layer { | |
name: "res65_conv1" | |
type: "Convolution" | |
bottom: "res65_bn" | |
top: "res65_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res65_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res65_conv1" | |
top: "res65_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res65_conv1_scale" | |
type: "Scale" | |
bottom: "res65_conv1" | |
top: "res65_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res65_conv1_relu" | |
type: "ReLU" | |
bottom: "res65_conv1" | |
top: "res65_conv1" | |
} | |
layer { | |
name: "res65_conv2" | |
type: "Convolution" | |
bottom: "res65_conv1" | |
top: "res65_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res65_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res65_conv2" | |
top: "res65_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res65_conv2_scale" | |
type: "Scale" | |
bottom: "res65_conv2" | |
top: "res65_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res65_conv2_relu" | |
type: "ReLU" | |
bottom: "res65_conv2" | |
top: "res65_conv2" | |
} | |
layer { | |
name: "res65_conv3" | |
type: "Convolution" | |
bottom: "res65_conv2" | |
top: "res65_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res65_eletwise" | |
type: "Eltwise" | |
bottom: "res64_eletwise" | |
bottom: "res65_conv3" | |
top: "res65_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res66_bn" | |
type: "BatchNorm" | |
bottom: "res65_eletwise" | |
top: "res66_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res66_scale" | |
type: "Scale" | |
bottom: "res66_bn" | |
top: "res66_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res66_relu" | |
type: "ReLU" | |
bottom: "res66_bn" | |
top: "res66_bn" | |
} | |
layer { | |
name: "res66_conv1" | |
type: "Convolution" | |
bottom: "res66_bn" | |
top: "res66_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res66_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res66_conv1" | |
top: "res66_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res66_conv1_scale" | |
type: "Scale" | |
bottom: "res66_conv1" | |
top: "res66_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res66_conv1_relu" | |
type: "ReLU" | |
bottom: "res66_conv1" | |
top: "res66_conv1" | |
} | |
layer { | |
name: "res66_conv2" | |
type: "Convolution" | |
bottom: "res66_conv1" | |
top: "res66_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res66_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res66_conv2" | |
top: "res66_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res66_conv2_scale" | |
type: "Scale" | |
bottom: "res66_conv2" | |
top: "res66_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res66_conv2_relu" | |
type: "ReLU" | |
bottom: "res66_conv2" | |
top: "res66_conv2" | |
} | |
layer { | |
name: "res66_conv3" | |
type: "Convolution" | |
bottom: "res66_conv2" | |
top: "res66_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res66_eletwise" | |
type: "Eltwise" | |
bottom: "res65_eletwise" | |
bottom: "res66_conv3" | |
top: "res66_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res67_bn" | |
type: "BatchNorm" | |
bottom: "res66_eletwise" | |
top: "res67_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res67_scale" | |
type: "Scale" | |
bottom: "res67_bn" | |
top: "res67_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res67_relu" | |
type: "ReLU" | |
bottom: "res67_bn" | |
top: "res67_bn" | |
} | |
layer { | |
name: "res67_conv1" | |
type: "Convolution" | |
bottom: "res67_bn" | |
top: "res67_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res67_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res67_conv1" | |
top: "res67_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res67_conv1_scale" | |
type: "Scale" | |
bottom: "res67_conv1" | |
top: "res67_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res67_conv1_relu" | |
type: "ReLU" | |
bottom: "res67_conv1" | |
top: "res67_conv1" | |
} | |
layer { | |
name: "res67_conv2" | |
type: "Convolution" | |
bottom: "res67_conv1" | |
top: "res67_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res67_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res67_conv2" | |
top: "res67_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res67_conv2_scale" | |
type: "Scale" | |
bottom: "res67_conv2" | |
top: "res67_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res67_conv2_relu" | |
type: "ReLU" | |
bottom: "res67_conv2" | |
top: "res67_conv2" | |
} | |
layer { | |
name: "res67_conv3" | |
type: "Convolution" | |
bottom: "res67_conv2" | |
top: "res67_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res67_eletwise" | |
type: "Eltwise" | |
bottom: "res66_eletwise" | |
bottom: "res67_conv3" | |
top: "res67_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res68_bn" | |
type: "BatchNorm" | |
bottom: "res67_eletwise" | |
top: "res68_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res68_scale" | |
type: "Scale" | |
bottom: "res68_bn" | |
top: "res68_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res68_relu" | |
type: "ReLU" | |
bottom: "res68_bn" | |
top: "res68_bn" | |
} | |
layer { | |
name: "res68_conv1" | |
type: "Convolution" | |
bottom: "res68_bn" | |
top: "res68_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res68_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res68_conv1" | |
top: "res68_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res68_conv1_scale" | |
type: "Scale" | |
bottom: "res68_conv1" | |
top: "res68_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res68_conv1_relu" | |
type: "ReLU" | |
bottom: "res68_conv1" | |
top: "res68_conv1" | |
} | |
layer { | |
name: "res68_conv2" | |
type: "Convolution" | |
bottom: "res68_conv1" | |
top: "res68_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res68_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res68_conv2" | |
top: "res68_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res68_conv2_scale" | |
type: "Scale" | |
bottom: "res68_conv2" | |
top: "res68_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res68_conv2_relu" | |
type: "ReLU" | |
bottom: "res68_conv2" | |
top: "res68_conv2" | |
} | |
layer { | |
name: "res68_conv3" | |
type: "Convolution" | |
bottom: "res68_conv2" | |
top: "res68_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res68_eletwise" | |
type: "Eltwise" | |
bottom: "res67_eletwise" | |
bottom: "res68_conv3" | |
top: "res68_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res69_bn" | |
type: "BatchNorm" | |
bottom: "res68_eletwise" | |
top: "res69_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res69_scale" | |
type: "Scale" | |
bottom: "res69_bn" | |
top: "res69_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res69_relu" | |
type: "ReLU" | |
bottom: "res69_bn" | |
top: "res69_bn" | |
} | |
layer { | |
name: "res69_conv1" | |
type: "Convolution" | |
bottom: "res69_bn" | |
top: "res69_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res69_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res69_conv1" | |
top: "res69_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res69_conv1_scale" | |
type: "Scale" | |
bottom: "res69_conv1" | |
top: "res69_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res69_conv1_relu" | |
type: "ReLU" | |
bottom: "res69_conv1" | |
top: "res69_conv1" | |
} | |
layer { | |
name: "res69_conv2" | |
type: "Convolution" | |
bottom: "res69_conv1" | |
top: "res69_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res69_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res69_conv2" | |
top: "res69_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res69_conv2_scale" | |
type: "Scale" | |
bottom: "res69_conv2" | |
top: "res69_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res69_conv2_relu" | |
type: "ReLU" | |
bottom: "res69_conv2" | |
top: "res69_conv2" | |
} | |
layer { | |
name: "res69_conv3" | |
type: "Convolution" | |
bottom: "res69_conv2" | |
top: "res69_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res69_eletwise" | |
type: "Eltwise" | |
bottom: "res68_eletwise" | |
bottom: "res69_conv3" | |
top: "res69_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res70_bn" | |
type: "BatchNorm" | |
bottom: "res69_eletwise" | |
top: "res70_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res70_scale" | |
type: "Scale" | |
bottom: "res70_bn" | |
top: "res70_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res70_relu" | |
type: "ReLU" | |
bottom: "res70_bn" | |
top: "res70_bn" | |
} | |
layer { | |
name: "res70_conv1" | |
type: "Convolution" | |
bottom: "res70_bn" | |
top: "res70_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res70_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res70_conv1" | |
top: "res70_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res70_conv1_scale" | |
type: "Scale" | |
bottom: "res70_conv1" | |
top: "res70_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res70_conv1_relu" | |
type: "ReLU" | |
bottom: "res70_conv1" | |
top: "res70_conv1" | |
} | |
layer { | |
name: "res70_conv2" | |
type: "Convolution" | |
bottom: "res70_conv1" | |
top: "res70_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res70_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res70_conv2" | |
top: "res70_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res70_conv2_scale" | |
type: "Scale" | |
bottom: "res70_conv2" | |
top: "res70_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res70_conv2_relu" | |
type: "ReLU" | |
bottom: "res70_conv2" | |
top: "res70_conv2" | |
} | |
layer { | |
name: "res70_conv3" | |
type: "Convolution" | |
bottom: "res70_conv2" | |
top: "res70_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res70_eletwise" | |
type: "Eltwise" | |
bottom: "res69_eletwise" | |
bottom: "res70_conv3" | |
top: "res70_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res71_bn" | |
type: "BatchNorm" | |
bottom: "res70_eletwise" | |
top: "res71_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res71_scale" | |
type: "Scale" | |
bottom: "res71_bn" | |
top: "res71_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res71_relu" | |
type: "ReLU" | |
bottom: "res71_bn" | |
top: "res71_bn" | |
} | |
layer { | |
name: "res71_conv1" | |
type: "Convolution" | |
bottom: "res71_bn" | |
top: "res71_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res71_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res71_conv1" | |
top: "res71_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res71_conv1_scale" | |
type: "Scale" | |
bottom: "res71_conv1" | |
top: "res71_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res71_conv1_relu" | |
type: "ReLU" | |
bottom: "res71_conv1" | |
top: "res71_conv1" | |
} | |
layer { | |
name: "res71_conv2" | |
type: "Convolution" | |
bottom: "res71_conv1" | |
top: "res71_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res71_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res71_conv2" | |
top: "res71_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res71_conv2_scale" | |
type: "Scale" | |
bottom: "res71_conv2" | |
top: "res71_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res71_conv2_relu" | |
type: "ReLU" | |
bottom: "res71_conv2" | |
top: "res71_conv2" | |
} | |
layer { | |
name: "res71_conv3" | |
type: "Convolution" | |
bottom: "res71_conv2" | |
top: "res71_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res71_eletwise" | |
type: "Eltwise" | |
bottom: "res70_eletwise" | |
bottom: "res71_conv3" | |
top: "res71_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res72_bn" | |
type: "BatchNorm" | |
bottom: "res71_eletwise" | |
top: "res72_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res72_scale" | |
type: "Scale" | |
bottom: "res72_bn" | |
top: "res72_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res72_relu" | |
type: "ReLU" | |
bottom: "res72_bn" | |
top: "res72_bn" | |
} | |
layer { | |
name: "res72_conv1" | |
type: "Convolution" | |
bottom: "res72_bn" | |
top: "res72_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res72_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res72_conv1" | |
top: "res72_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res72_conv1_scale" | |
type: "Scale" | |
bottom: "res72_conv1" | |
top: "res72_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res72_conv1_relu" | |
type: "ReLU" | |
bottom: "res72_conv1" | |
top: "res72_conv1" | |
} | |
layer { | |
name: "res72_conv2" | |
type: "Convolution" | |
bottom: "res72_conv1" | |
top: "res72_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res72_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res72_conv2" | |
top: "res72_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res72_conv2_scale" | |
type: "Scale" | |
bottom: "res72_conv2" | |
top: "res72_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res72_conv2_relu" | |
type: "ReLU" | |
bottom: "res72_conv2" | |
top: "res72_conv2" | |
} | |
layer { | |
name: "res72_conv3" | |
type: "Convolution" | |
bottom: "res72_conv2" | |
top: "res72_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res72_eletwise" | |
type: "Eltwise" | |
bottom: "res71_eletwise" | |
bottom: "res72_conv3" | |
top: "res72_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res73_bn" | |
type: "BatchNorm" | |
bottom: "res72_eletwise" | |
top: "res73_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res73_scale" | |
type: "Scale" | |
bottom: "res73_bn" | |
top: "res73_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res73_relu" | |
type: "ReLU" | |
bottom: "res73_bn" | |
top: "res73_bn" | |
} | |
layer { | |
name: "res73_conv1" | |
type: "Convolution" | |
bottom: "res73_bn" | |
top: "res73_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res73_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res73_conv1" | |
top: "res73_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res73_conv1_scale" | |
type: "Scale" | |
bottom: "res73_conv1" | |
top: "res73_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res73_conv1_relu" | |
type: "ReLU" | |
bottom: "res73_conv1" | |
top: "res73_conv1" | |
} | |
layer { | |
name: "res73_conv2" | |
type: "Convolution" | |
bottom: "res73_conv1" | |
top: "res73_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res73_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res73_conv2" | |
top: "res73_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res73_conv2_scale" | |
type: "Scale" | |
bottom: "res73_conv2" | |
top: "res73_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res73_conv2_relu" | |
type: "ReLU" | |
bottom: "res73_conv2" | |
top: "res73_conv2" | |
} | |
layer { | |
name: "res73_conv3" | |
type: "Convolution" | |
bottom: "res73_conv2" | |
top: "res73_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res73_eletwise" | |
type: "Eltwise" | |
bottom: "res72_eletwise" | |
bottom: "res73_conv3" | |
top: "res73_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res74_bn" | |
type: "BatchNorm" | |
bottom: "res73_eletwise" | |
top: "res74_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res74_scale" | |
type: "Scale" | |
bottom: "res74_bn" | |
top: "res74_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res74_relu" | |
type: "ReLU" | |
bottom: "res74_bn" | |
top: "res74_bn" | |
} | |
layer { | |
name: "res74_conv1" | |
type: "Convolution" | |
bottom: "res74_bn" | |
top: "res74_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res74_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res74_conv1" | |
top: "res74_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res74_conv1_scale" | |
type: "Scale" | |
bottom: "res74_conv1" | |
top: "res74_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res74_conv1_relu" | |
type: "ReLU" | |
bottom: "res74_conv1" | |
top: "res74_conv1" | |
} | |
layer { | |
name: "res74_conv2" | |
type: "Convolution" | |
bottom: "res74_conv1" | |
top: "res74_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res74_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res74_conv2" | |
top: "res74_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res74_conv2_scale" | |
type: "Scale" | |
bottom: "res74_conv2" | |
top: "res74_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res74_conv2_relu" | |
type: "ReLU" | |
bottom: "res74_conv2" | |
top: "res74_conv2" | |
} | |
layer { | |
name: "res74_conv3" | |
type: "Convolution" | |
bottom: "res74_conv2" | |
top: "res74_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res74_eletwise" | |
type: "Eltwise" | |
bottom: "res73_eletwise" | |
bottom: "res74_conv3" | |
top: "res74_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res75_bn" | |
type: "BatchNorm" | |
bottom: "res74_eletwise" | |
top: "res75_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res75_scale" | |
type: "Scale" | |
bottom: "res75_bn" | |
top: "res75_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res75_relu" | |
type: "ReLU" | |
bottom: "res75_bn" | |
top: "res75_bn" | |
} | |
layer { | |
name: "res75_conv1" | |
type: "Convolution" | |
bottom: "res75_bn" | |
top: "res75_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res75_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res75_conv1" | |
top: "res75_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res75_conv1_scale" | |
type: "Scale" | |
bottom: "res75_conv1" | |
top: "res75_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res75_conv1_relu" | |
type: "ReLU" | |
bottom: "res75_conv1" | |
top: "res75_conv1" | |
} | |
layer { | |
name: "res75_conv2" | |
type: "Convolution" | |
bottom: "res75_conv1" | |
top: "res75_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res75_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res75_conv2" | |
top: "res75_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res75_conv2_scale" | |
type: "Scale" | |
bottom: "res75_conv2" | |
top: "res75_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res75_conv2_relu" | |
type: "ReLU" | |
bottom: "res75_conv2" | |
top: "res75_conv2" | |
} | |
layer { | |
name: "res75_conv3" | |
type: "Convolution" | |
bottom: "res75_conv2" | |
top: "res75_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res75_eletwise" | |
type: "Eltwise" | |
bottom: "res74_eletwise" | |
bottom: "res75_conv3" | |
top: "res75_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res76_bn" | |
type: "BatchNorm" | |
bottom: "res75_eletwise" | |
top: "res76_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res76_scale" | |
type: "Scale" | |
bottom: "res76_bn" | |
top: "res76_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res76_relu" | |
type: "ReLU" | |
bottom: "res76_bn" | |
top: "res76_bn" | |
} | |
layer { | |
name: "res76_conv1" | |
type: "Convolution" | |
bottom: "res76_bn" | |
top: "res76_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res76_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res76_conv1" | |
top: "res76_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res76_conv1_scale" | |
type: "Scale" | |
bottom: "res76_conv1" | |
top: "res76_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res76_conv1_relu" | |
type: "ReLU" | |
bottom: "res76_conv1" | |
top: "res76_conv1" | |
} | |
layer { | |
name: "res76_conv2" | |
type: "Convolution" | |
bottom: "res76_conv1" | |
top: "res76_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res76_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res76_conv2" | |
top: "res76_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res76_conv2_scale" | |
type: "Scale" | |
bottom: "res76_conv2" | |
top: "res76_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res76_conv2_relu" | |
type: "ReLU" | |
bottom: "res76_conv2" | |
top: "res76_conv2" | |
} | |
layer { | |
name: "res76_conv3" | |
type: "Convolution" | |
bottom: "res76_conv2" | |
top: "res76_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res76_eletwise" | |
type: "Eltwise" | |
bottom: "res75_eletwise" | |
bottom: "res76_conv3" | |
top: "res76_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res77_bn" | |
type: "BatchNorm" | |
bottom: "res76_eletwise" | |
top: "res77_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res77_scale" | |
type: "Scale" | |
bottom: "res77_bn" | |
top: "res77_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res77_relu" | |
type: "ReLU" | |
bottom: "res77_bn" | |
top: "res77_bn" | |
} | |
layer { | |
name: "res77_conv1" | |
type: "Convolution" | |
bottom: "res77_bn" | |
top: "res77_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res77_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res77_conv1" | |
top: "res77_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res77_conv1_scale" | |
type: "Scale" | |
bottom: "res77_conv1" | |
top: "res77_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res77_conv1_relu" | |
type: "ReLU" | |
bottom: "res77_conv1" | |
top: "res77_conv1" | |
} | |
layer { | |
name: "res77_conv2" | |
type: "Convolution" | |
bottom: "res77_conv1" | |
top: "res77_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res77_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res77_conv2" | |
top: "res77_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res77_conv2_scale" | |
type: "Scale" | |
bottom: "res77_conv2" | |
top: "res77_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res77_conv2_relu" | |
type: "ReLU" | |
bottom: "res77_conv2" | |
top: "res77_conv2" | |
} | |
layer { | |
name: "res77_conv3" | |
type: "Convolution" | |
bottom: "res77_conv2" | |
top: "res77_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res77_eletwise" | |
type: "Eltwise" | |
bottom: "res76_eletwise" | |
bottom: "res77_conv3" | |
top: "res77_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res78_bn" | |
type: "BatchNorm" | |
bottom: "res77_eletwise" | |
top: "res78_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res78_scale" | |
type: "Scale" | |
bottom: "res78_bn" | |
top: "res78_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res78_relu" | |
type: "ReLU" | |
bottom: "res78_bn" | |
top: "res78_bn" | |
} | |
layer { | |
name: "res78_conv1" | |
type: "Convolution" | |
bottom: "res78_bn" | |
top: "res78_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res78_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res78_conv1" | |
top: "res78_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res78_conv1_scale" | |
type: "Scale" | |
bottom: "res78_conv1" | |
top: "res78_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res78_conv1_relu" | |
type: "ReLU" | |
bottom: "res78_conv1" | |
top: "res78_conv1" | |
} | |
layer { | |
name: "res78_conv2" | |
type: "Convolution" | |
bottom: "res78_conv1" | |
top: "res78_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res78_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res78_conv2" | |
top: "res78_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res78_conv2_scale" | |
type: "Scale" | |
bottom: "res78_conv2" | |
top: "res78_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res78_conv2_relu" | |
type: "ReLU" | |
bottom: "res78_conv2" | |
top: "res78_conv2" | |
} | |
layer { | |
name: "res78_conv3" | |
type: "Convolution" | |
bottom: "res78_conv2" | |
top: "res78_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res78_eletwise" | |
type: "Eltwise" | |
bottom: "res77_eletwise" | |
bottom: "res78_conv3" | |
top: "res78_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res79_bn" | |
type: "BatchNorm" | |
bottom: "res78_eletwise" | |
top: "res79_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res79_scale" | |
type: "Scale" | |
bottom: "res79_bn" | |
top: "res79_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res79_relu" | |
type: "ReLU" | |
bottom: "res79_bn" | |
top: "res79_bn" | |
} | |
layer { | |
name: "res79_conv1" | |
type: "Convolution" | |
bottom: "res79_bn" | |
top: "res79_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res79_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res79_conv1" | |
top: "res79_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res79_conv1_scale" | |
type: "Scale" | |
bottom: "res79_conv1" | |
top: "res79_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res79_conv1_relu" | |
type: "ReLU" | |
bottom: "res79_conv1" | |
top: "res79_conv1" | |
} | |
layer { | |
name: "res79_conv2" | |
type: "Convolution" | |
bottom: "res79_conv1" | |
top: "res79_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res79_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res79_conv2" | |
top: "res79_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res79_conv2_scale" | |
type: "Scale" | |
bottom: "res79_conv2" | |
top: "res79_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res79_conv2_relu" | |
type: "ReLU" | |
bottom: "res79_conv2" | |
top: "res79_conv2" | |
} | |
layer { | |
name: "res79_conv3" | |
type: "Convolution" | |
bottom: "res79_conv2" | |
top: "res79_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res79_eletwise" | |
type: "Eltwise" | |
bottom: "res78_eletwise" | |
bottom: "res79_conv3" | |
top: "res79_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res80_bn" | |
type: "BatchNorm" | |
bottom: "res79_eletwise" | |
top: "res80_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res80_scale" | |
type: "Scale" | |
bottom: "res80_bn" | |
top: "res80_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res80_relu" | |
type: "ReLU" | |
bottom: "res80_bn" | |
top: "res80_bn" | |
} | |
layer { | |
name: "res80_conv1" | |
type: "Convolution" | |
bottom: "res80_bn" | |
top: "res80_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res80_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res80_conv1" | |
top: "res80_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res80_conv1_scale" | |
type: "Scale" | |
bottom: "res80_conv1" | |
top: "res80_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res80_conv1_relu" | |
type: "ReLU" | |
bottom: "res80_conv1" | |
top: "res80_conv1" | |
} | |
layer { | |
name: "res80_conv2" | |
type: "Convolution" | |
bottom: "res80_conv1" | |
top: "res80_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res80_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res80_conv2" | |
top: "res80_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res80_conv2_scale" | |
type: "Scale" | |
bottom: "res80_conv2" | |
top: "res80_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res80_conv2_relu" | |
type: "ReLU" | |
bottom: "res80_conv2" | |
top: "res80_conv2" | |
} | |
layer { | |
name: "res80_conv3" | |
type: "Convolution" | |
bottom: "res80_conv2" | |
top: "res80_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res80_eletwise" | |
type: "Eltwise" | |
bottom: "res79_eletwise" | |
bottom: "res80_conv3" | |
top: "res80_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res81_bn" | |
type: "BatchNorm" | |
bottom: "res80_eletwise" | |
top: "res81_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res81_scale" | |
type: "Scale" | |
bottom: "res81_bn" | |
top: "res81_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res81_relu" | |
type: "ReLU" | |
bottom: "res81_bn" | |
top: "res81_bn" | |
} | |
layer { | |
name: "res81_conv1" | |
type: "Convolution" | |
bottom: "res81_bn" | |
top: "res81_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res81_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res81_conv1" | |
top: "res81_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res81_conv1_scale" | |
type: "Scale" | |
bottom: "res81_conv1" | |
top: "res81_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res81_conv1_relu" | |
type: "ReLU" | |
bottom: "res81_conv1" | |
top: "res81_conv1" | |
} | |
layer { | |
name: "res81_conv2" | |
type: "Convolution" | |
bottom: "res81_conv1" | |
top: "res81_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res81_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res81_conv2" | |
top: "res81_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res81_conv2_scale" | |
type: "Scale" | |
bottom: "res81_conv2" | |
top: "res81_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res81_conv2_relu" | |
type: "ReLU" | |
bottom: "res81_conv2" | |
top: "res81_conv2" | |
} | |
layer { | |
name: "res81_conv3" | |
type: "Convolution" | |
bottom: "res81_conv2" | |
top: "res81_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 1024 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res81_eletwise" | |
type: "Eltwise" | |
bottom: "res80_eletwise" | |
bottom: "res81_conv3" | |
top: "res81_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res82_bn" | |
type: "BatchNorm" | |
bottom: "res81_eletwise" | |
top: "res82_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res82_scale" | |
type: "Scale" | |
bottom: "res82_bn" | |
top: "res82_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res82_relu" | |
type: "ReLU" | |
bottom: "res82_bn" | |
top: "res82_bn" | |
} | |
layer { | |
name: "res82_conv1" | |
type: "Convolution" | |
bottom: "res82_bn" | |
top: "res82_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res82_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res82_conv1" | |
top: "res82_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res82_conv1_scale" | |
type: "Scale" | |
bottom: "res82_conv1" | |
top: "res82_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res82_conv1_relu" | |
type: "ReLU" | |
bottom: "res82_conv1" | |
top: "res82_conv1" | |
} | |
layer { | |
name: "res82_conv2" | |
type: "Convolution" | |
bottom: "res82_conv1" | |
top: "res82_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res82_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res82_conv2" | |
top: "res82_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res82_conv2_scale" | |
type: "Scale" | |
bottom: "res82_conv2" | |
top: "res82_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res82_conv2_relu" | |
type: "ReLU" | |
bottom: "res82_conv2" | |
top: "res82_conv2" | |
} | |
layer { | |
name: "res82_conv3" | |
type: "Convolution" | |
bottom: "res82_conv2" | |
top: "res82_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res82_match_conv" | |
type: "Convolution" | |
bottom: "res82_bn" | |
top: "res82_match_conv" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res82_eletwise" | |
type: "Eltwise" | |
bottom: "res82_match_conv" | |
bottom: "res82_conv3" | |
top: "res82_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res83_bn" | |
type: "BatchNorm" | |
bottom: "res82_eletwise" | |
top: "res83_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res83_scale" | |
type: "Scale" | |
bottom: "res83_bn" | |
top: "res83_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res83_relu" | |
type: "ReLU" | |
bottom: "res83_bn" | |
top: "res83_bn" | |
} | |
layer { | |
name: "res83_conv1" | |
type: "Convolution" | |
bottom: "res83_bn" | |
top: "res83_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res83_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res83_conv1" | |
top: "res83_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res83_conv1_scale" | |
type: "Scale" | |
bottom: "res83_conv1" | |
top: "res83_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res83_conv1_relu" | |
type: "ReLU" | |
bottom: "res83_conv1" | |
top: "res83_conv1" | |
} | |
layer { | |
name: "res83_conv2" | |
type: "Convolution" | |
bottom: "res83_conv1" | |
top: "res83_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res83_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res83_conv2" | |
top: "res83_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res83_conv2_scale" | |
type: "Scale" | |
bottom: "res83_conv2" | |
top: "res83_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res83_conv2_relu" | |
type: "ReLU" | |
bottom: "res83_conv2" | |
top: "res83_conv2" | |
} | |
layer { | |
name: "res83_conv3" | |
type: "Convolution" | |
bottom: "res83_conv2" | |
top: "res83_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res83_eletwise" | |
type: "Eltwise" | |
bottom: "res82_eletwise" | |
bottom: "res83_conv3" | |
top: "res83_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res84_bn" | |
type: "BatchNorm" | |
bottom: "res83_eletwise" | |
top: "res84_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res84_scale" | |
type: "Scale" | |
bottom: "res84_bn" | |
top: "res84_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res84_relu" | |
type: "ReLU" | |
bottom: "res84_bn" | |
top: "res84_bn" | |
} | |
layer { | |
name: "res84_conv1" | |
type: "Convolution" | |
bottom: "res84_bn" | |
top: "res84_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res84_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res84_conv1" | |
top: "res84_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res84_conv1_scale" | |
type: "Scale" | |
bottom: "res84_conv1" | |
top: "res84_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res84_conv1_relu" | |
type: "ReLU" | |
bottom: "res84_conv1" | |
top: "res84_conv1" | |
} | |
layer { | |
name: "res84_conv2" | |
type: "Convolution" | |
bottom: "res84_conv1" | |
top: "res84_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res84_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res84_conv2" | |
top: "res84_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res84_conv2_scale" | |
type: "Scale" | |
bottom: "res84_conv2" | |
top: "res84_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res84_conv2_relu" | |
type: "ReLU" | |
bottom: "res84_conv2" | |
top: "res84_conv2" | |
} | |
layer { | |
name: "res84_conv3" | |
type: "Convolution" | |
bottom: "res84_conv2" | |
top: "res84_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res84_eletwise" | |
type: "Eltwise" | |
bottom: "res83_eletwise" | |
bottom: "res84_conv3" | |
top: "res84_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res85_bn" | |
type: "BatchNorm" | |
bottom: "res84_eletwise" | |
top: "res85_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res85_scale" | |
type: "Scale" | |
bottom: "res85_bn" | |
top: "res85_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res85_relu" | |
type: "ReLU" | |
bottom: "res85_bn" | |
top: "res85_bn" | |
} | |
layer { | |
name: "res85_conv1" | |
type: "Convolution" | |
bottom: "res85_bn" | |
top: "res85_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res85_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res85_conv1" | |
top: "res85_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res85_conv1_scale" | |
type: "Scale" | |
bottom: "res85_conv1" | |
top: "res85_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res85_conv1_relu" | |
type: "ReLU" | |
bottom: "res85_conv1" | |
top: "res85_conv1" | |
} | |
layer { | |
name: "res85_conv2" | |
type: "Convolution" | |
bottom: "res85_conv1" | |
top: "res85_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res85_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res85_conv2" | |
top: "res85_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res85_conv2_scale" | |
type: "Scale" | |
bottom: "res85_conv2" | |
top: "res85_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res85_conv2_relu" | |
type: "ReLU" | |
bottom: "res85_conv2" | |
top: "res85_conv2" | |
} | |
layer { | |
name: "res85_conv3" | |
type: "Convolution" | |
bottom: "res85_conv2" | |
top: "res85_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res85_eletwise" | |
type: "Eltwise" | |
bottom: "res84_eletwise" | |
bottom: "res85_conv3" | |
top: "res85_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res86_bn" | |
type: "BatchNorm" | |
bottom: "res85_eletwise" | |
top: "res86_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res86_scale" | |
type: "Scale" | |
bottom: "res86_bn" | |
top: "res86_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res86_relu" | |
type: "ReLU" | |
bottom: "res86_bn" | |
top: "res86_bn" | |
} | |
layer { | |
name: "res86_conv1" | |
type: "Convolution" | |
bottom: "res86_bn" | |
top: "res86_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res86_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res86_conv1" | |
top: "res86_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res86_conv1_scale" | |
type: "Scale" | |
bottom: "res86_conv1" | |
top: "res86_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res86_conv1_relu" | |
type: "ReLU" | |
bottom: "res86_conv1" | |
top: "res86_conv1" | |
} | |
layer { | |
name: "res86_conv2" | |
type: "Convolution" | |
bottom: "res86_conv1" | |
top: "res86_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res86_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res86_conv2" | |
top: "res86_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res86_conv2_scale" | |
type: "Scale" | |
bottom: "res86_conv2" | |
top: "res86_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res86_conv2_relu" | |
type: "ReLU" | |
bottom: "res86_conv2" | |
top: "res86_conv2" | |
} | |
layer { | |
name: "res86_conv3" | |
type: "Convolution" | |
bottom: "res86_conv2" | |
top: "res86_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res86_eletwise" | |
type: "Eltwise" | |
bottom: "res85_eletwise" | |
bottom: "res86_conv3" | |
top: "res86_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res87_bn" | |
type: "BatchNorm" | |
bottom: "res86_eletwise" | |
top: "res87_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res87_scale" | |
type: "Scale" | |
bottom: "res87_bn" | |
top: "res87_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res87_relu" | |
type: "ReLU" | |
bottom: "res87_bn" | |
top: "res87_bn" | |
} | |
layer { | |
name: "res87_conv1" | |
type: "Convolution" | |
bottom: "res87_bn" | |
top: "res87_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res87_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res87_conv1" | |
top: "res87_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res87_conv1_scale" | |
type: "Scale" | |
bottom: "res87_conv1" | |
top: "res87_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res87_conv1_relu" | |
type: "ReLU" | |
bottom: "res87_conv1" | |
top: "res87_conv1" | |
} | |
layer { | |
name: "res87_conv2" | |
type: "Convolution" | |
bottom: "res87_conv1" | |
top: "res87_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res87_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res87_conv2" | |
top: "res87_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res87_conv2_scale" | |
type: "Scale" | |
bottom: "res87_conv2" | |
top: "res87_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res87_conv2_relu" | |
type: "ReLU" | |
bottom: "res87_conv2" | |
top: "res87_conv2" | |
} | |
layer { | |
name: "res87_conv3" | |
type: "Convolution" | |
bottom: "res87_conv2" | |
top: "res87_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res87_eletwise" | |
type: "Eltwise" | |
bottom: "res86_eletwise" | |
bottom: "res87_conv3" | |
top: "res87_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res88_bn" | |
type: "BatchNorm" | |
bottom: "res87_eletwise" | |
top: "res88_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res88_scale" | |
type: "Scale" | |
bottom: "res88_bn" | |
top: "res88_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res88_relu" | |
type: "ReLU" | |
bottom: "res88_bn" | |
top: "res88_bn" | |
} | |
layer { | |
name: "res88_conv1" | |
type: "Convolution" | |
bottom: "res88_bn" | |
top: "res88_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res88_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res88_conv1" | |
top: "res88_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res88_conv1_scale" | |
type: "Scale" | |
bottom: "res88_conv1" | |
top: "res88_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res88_conv1_relu" | |
type: "ReLU" | |
bottom: "res88_conv1" | |
top: "res88_conv1" | |
} | |
layer { | |
name: "res88_conv2" | |
type: "Convolution" | |
bottom: "res88_conv1" | |
top: "res88_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res88_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res88_conv2" | |
top: "res88_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res88_conv2_scale" | |
type: "Scale" | |
bottom: "res88_conv2" | |
top: "res88_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res88_conv2_relu" | |
type: "ReLU" | |
bottom: "res88_conv2" | |
top: "res88_conv2" | |
} | |
layer { | |
name: "res88_conv3" | |
type: "Convolution" | |
bottom: "res88_conv2" | |
top: "res88_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res88_eletwise" | |
type: "Eltwise" | |
bottom: "res87_eletwise" | |
bottom: "res88_conv3" | |
top: "res88_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res89_bn" | |
type: "BatchNorm" | |
bottom: "res88_eletwise" | |
top: "res89_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res89_scale" | |
type: "Scale" | |
bottom: "res89_bn" | |
top: "res89_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res89_relu" | |
type: "ReLU" | |
bottom: "res89_bn" | |
top: "res89_bn" | |
} | |
layer { | |
name: "res89_conv1" | |
type: "Convolution" | |
bottom: "res89_bn" | |
top: "res89_conv1" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res89_conv1_bn" | |
type: "BatchNorm" | |
bottom: "res89_conv1" | |
top: "res89_conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res89_conv1_scale" | |
type: "Scale" | |
bottom: "res89_conv1" | |
top: "res89_conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res89_conv1_relu" | |
type: "ReLU" | |
bottom: "res89_conv1" | |
top: "res89_conv1" | |
} | |
layer { | |
name: "res89_conv2" | |
type: "Convolution" | |
bottom: "res89_conv1" | |
top: "res89_conv2" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 512 | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res89_conv2_bn" | |
type: "BatchNorm" | |
bottom: "res89_conv2" | |
top: "res89_conv2" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res89_conv2_scale" | |
type: "Scale" | |
bottom: "res89_conv2" | |
top: "res89_conv2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res89_conv2_relu" | |
type: "ReLU" | |
bottom: "res89_conv2" | |
top: "res89_conv2" | |
} | |
layer { | |
name: "res89_conv3" | |
type: "Convolution" | |
bottom: "res89_conv2" | |
top: "res89_conv3" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
convolution_param { | |
bias_term: false | |
num_output: 2048 | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
std: 0.01 | |
} | |
} | |
} | |
layer { | |
name: "res89_eletwise" | |
type: "Eltwise" | |
bottom: "res88_eletwise" | |
bottom: "res89_conv3" | |
top: "res89_eletwise" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "res89_eletwise_bn" | |
type: "BatchNorm" | |
bottom: "res89_eletwise" | |
top: "res89_eletwise_bn" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "res89_eletwise_scale" | |
type: "Scale" | |
bottom: "res89_eletwise_bn" | |
top: "res89_eletwise_bn" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res89_eletwise_relu" | |
type: "ReLU" | |
bottom: "res89_eletwise_bn" | |
top: "res89_eletwise_bn" | |
} | |
layer { | |
name: "pool5" | |
type: "Pooling" | |
bottom: "res89_eletwise_bn" | |
top: "pool5" | |
pooling_param { | |
pool: AVE | |
global_pooling: true | |
} | |
} | |
layer { | |
name: "classifier" | |
type: "InnerProduct" | |
bottom: "pool5" | |
top: "classifier" | |
param { | |
lr_mult: 1 | |
decay_mult: 1 | |
} | |
inner_product_param { | |
num_output: 1000 | |
weight_filler { | |
type: "xavier" | |
} | |
} | |
} | |
layer { | |
name: "prob" | |
type: "Softmax" | |
bottom: "classifier" | |
top: "prob" | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment