3
cpp를 사용하는 Windows에서 caffe를 사용하여 이미지 분할 문제를 해결하고 있습니다. "Imagedata"입력 유형을 사용하여 네트워크를 교육하지만 테스트하는 동안 Blank 출력이 표시됩니다. 누구든지 문제를 분석하도록 도와 줄 수 있습니까?입력 레이어 유형 : ImageData in windows caffe cpp 빈 출력 제공
********** solver.prototxt ***************
test_initialization: false
base_lr: 0.01
display: 51
max_iter: 50000
lr_policy: "step"
gamma: 0.1
momentum: 0.9
weight_decay: 0.0001
stepsize: 4069
snapshot: 10000
snapshot_prefix: "snapshot"
solver_mode: GPU
net: "train.prototxt"
solver_type: SGD
File_Triangle.txt 및 File_label_triangle.txt에는 이미지 위치의 절대 경로와 더미 레이블이 있습니다. 예 : D : 내가 네트워크를 테스트 .caffemodel를 사용하고 교육 후 CPP
shared_ptr<Net<float> > net_;
net_.reset(new Net<float>("train.prototxt", caffe::Phase::TRAIN));
Caffe::set_mode(Caffe::GPU);
caffe::SolverParameter solver_param;
caffe::ReadSolverParamsFromTextFileOrDie("solver.prototxt", &solver_param);
boost::shared_ptr<caffe::Solver<float> > solver(caffe::SolverRegistry<float>::CreateSolver(solver_param));
solver->Solve();
에서 훈련 00000032.png 0
**************** train.prototxt ********************
layer {
name: "data"
type: "ImageData"
top: "data"
top: "xx"
include {
phase: TRAIN
}
image_data_param {
source: "File_triangle.txt"
batch_size: 1
new_height: 32
new_width: 32
is_color: False
}
}
layer {
name: "label"
type: "ImageData"
top: "label"
top: "yy"
image_data_param {
source: "File_label_triangle.txt"
batch_size: 1
new_height: 32
new_width: 32
is_color: False
}
include {
phase: TRAIN
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv1"
top: "conv2"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 16
stride: 16
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "conv2"
top: "upsample"
param {
lr_mult: 1.0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 16
stride: 16
bias_filler {
type: "constant"
value: 128.0
}
}
}
layer {
name: "lossL1"
type: "SmoothL1Loss"
bottom: "upsample"
bottom: "label"
top: "lossL1"
loss_weight: 1.0
}
코드 \.
******************** test.prototxt **********************
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: 1 dim: 1 dim: 32 dim: 32 } }
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv1"
top: "conv2"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 16
stride: 16
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "conv2"
top: "upsample"
param {
lr_mult: 1.0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 16
stride: 16
bias_filler {
type: "constant"
value: 128.0
}
}
}
코드 스 니펫은 테스트에 사용됩니다.
Caffe::set_mode(Caffe::GPU);
boost::shared_ptr<caffe::Net<float> > net_;
net_.reset(new Net<float>("test.prototxt", caffe::TEST));
net_->CopyTrainedLayersFrom("snapshot_iter_50000.caffemodel");
cv::Mat matInput = cv::imread("input image path");
matInput.convertTo(matInput, CV_32F);
int height = matInput.rows;
int width = matInput.cols;
Blob<float>* input_layer = net_->input_blobs()[0];
float* input_data = input_layer->mutable_cpu_data();
int layer_index = height * width;
for (size_t i = 0; i < height; i++)
{
for (size_t j = 0; j < width; j++)
{
input_data[i*width + j] = matInput.at<float>(i, j);
}
}
net_->Forward();
const shared_ptr<Blob<float> >& concat_blob = net_->blob_by_name("upsample");
const float* concat_out = concat_blob->cpu_data();
cv::Mat matout(height, width, CV_8UC1);
for (size_t i = 0; i < height*width; i++)
{
matout.data[i] = concat_out[i];
}
cv::imwrite(output_str, matout);