1

SegNet에 업 샘플 레이어가 있고, 이미지가 480 * 360이며, 이미지 (565 * 584)를 사용할 때 다음 오류가 발생합니다.SegNet의 업 샘플 레이어를 이해하는 방법

여기
I0929 03:58:06.238135 22750 net.cpp:368] upsample4 -> pool4_D 
I0929 03:58:06.238142 22750 net.cpp:120] Setting up upsample4 
F0929 03:58:06.238164 22750 upsample_layer.cpp:63] Check failed: bottom[0]->height() == bottom[1]->height() (38 vs. 37) 

은 정의입니다 : 내가 upsample_wupsample_h 변경해야한다고 생각

layer { 
    name: "upsample4" 
    type: "Upsample" 
    bottom: "conv5_1_D" 
    top: "pool4_D" 
    bottom: "pool4_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 60 
    upsample_h: 45 
    } 
} 

,하지만 난 어떤 몸 정확한 value.Can을 알고 나에게 scaleupsample_wupsample_h과 크기 사이의 관계를 말하지 않는다 이미지 또는 방법 그것을 계산하십시오.

네트의 전체 정의는 : 당신은 upsample_wupsample_h을 변경해야합니다

name: "VGG_ILSVRC_16_layer" 
layer { 
    name: "data" 
    type: "DenseImageData" 
    top: "data" 
    top: "label" 
    dense_image_data_param { 
    source: "/home/zhaimo/SegNet/CamVid/mytrain.txt" # Change this to the absolute path to your data file 
    batch_size: 4    # Change this number to a batch size that will fit on your GPU 
    shuffle: true 
    } 
} 
layer { 
    bottom: "data" 
    top: "conv1_1" 
    name: "conv1_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "conv1_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "relu1_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_2" 
    name: "conv1_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "conv1_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "relu1_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_2" 
    top: "pool1" 
    top: "pool1_mask" 
    name: "pool1" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool1" 
    top: "conv2_1" 
    name: "conv2_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "conv2_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "relu2_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_2" 
    name: "conv2_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "conv2_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "relu2_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_2" 
    top: "pool2" 
    top: "pool2_mask" 
    name: "pool2" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool2" 
    top: "conv3_1" 
    name: "conv3_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "conv3_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "relu3_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_2" 
    name: "conv3_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "conv3_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "relu3_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_3" 
    name: "conv3_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "conv3_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "relu3_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_3" 
    top: "pool3" 
    top: "pool3_mask" 
    name: "pool3" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool3" 
    top: "conv4_1" 
    name: "conv4_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "conv4_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "relu4_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_2" 
    name: "conv4_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "conv4_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "relu4_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_3" 
    name: "conv4_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "conv4_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "relu4_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_3" 
    top: "pool4" 
    top: "pool4_mask" 
    name: "pool4" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool4" 
    top: "conv5_1" 
    name: "conv5_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "conv5_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "relu5_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_2" 
    name: "conv5_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "conv5_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "relu5_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_3" 
    name: "conv5_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "conv5_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "relu5_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_3" 
    top: "pool5" 
    top: "pool5_mask" 
    name: "pool5" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    name: "upsample5" 
    type: "Upsample" 
    bottom: "pool5" 
    top: "pool5_D" 
    bottom: "pool5_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 30 
    upsample_h: 23 
    } 
} 
....(The rest is omitted) 
+0

'conv5_1_D'의 '모양'이 'pool4_mask'의 '모양'과 다른 **이므로 '높이'가 다릅니다. – Shai

답변

0

을 segnet_train.prototxt. 마다 Pool layer 이미지를 축소합니다. X2. 그래서 당신은 가지고있는 층의 수를 세고 이미지의 크기를 고려하여 upsample을 계산해야합니다.