1

칼만 필터를 사용하여 비디오에서 배경을 전경에서 분리해야합니다. 누군가 제가 따라야 할 리소스 나 코드 예제를 줄 수 있습니까?배경 빼기 칼만 필터를 사용한 전경 검출

업데이트 : 여기에 좋은 예제가 있습니다. Traffic detection. 트래픽 탐지에 탁월하게 작용했지만 사람 추출을 위해 다시 적용하고 싶습니다.

다음
1. alpha = learning constant 
2. K= #of guassians in the mixture 
3. T = min. portion of background 
4. initVariance = initial variance 
5. pixelThresh = threshold condition for computing adaptive process on pixel 

이 주 파일의 개요입니다 (경우에 당신이 개요를 원하는)


function foregroundEstimation(movie) 

%Load video into memory and prepare output video for writing 

v = VideoReader(movie); 
numFrames = 150; 
foregroundVideo = VideoWriter('foreground.avi'); 
open(foregroundVideo); 

%video constants 

initFrame = read(v,1); 
global height; 
global width; 
height = size(initFrame,1); 
width = size(initFrame,2); 



%initialize GMM parameters: (based upon Stauffer notation) 
%http://www.ai.mit.edu/projects/vsam/Publications/stauffer_cvpr98_track.pdf 

% alpha = learning constant 
% K= #of guassians in the mixture 
% T = min. portion of background 
% initVariance = initial variance 
% pixelThresh = threshold condition for computing adaptive process on pixel 

alpha=0.05; % 0.05 
K=5;   % 5 
global T; % T = 0.8 
T=0.8; 

global initVariance; 
initVariance=75; % 75 
pixelThresh=45; % 45 
referenceDistance = 40; % 40 %shortcut to speed up processing time. Compare current pixel to pixel referenceDistance frames back and skip adaptive process if similar. Downside is doesn't collect background evidence as well. 
sideWeight = (K-1)/(1-T); 
global matchThres; 
matchThres = sqrt(3); 
global ccThreshold; 
ccThreshold = 9000; % 5000 
global deltaT; 
deltaT = 1; 
global numParticles; 
numParticles = 100; 

trackingOn = 0; % will superimpose tracking color marker on detected vehicles in output video. tackingOn should equal 1 or 0 

prevCentSize = 0; 


%structures to pass information between frames for detection purposes. 
field = 'f'; 
filterValue = {[];[];}; 
prevFilter = struct(field,filterValue); 
modelValue = {[];prevFilter}; 
prevModel = struct(field,modelValue); 

%initiailze video process components 
initColorBoxes(); 


foreFrame = zeros(height,width,3); 
backFrame = zeros(height,width,3); 

%map represents pixels at a given frame to perform adaptive process 
pixThreshMap = ones(height,width); 


%individual pixel process components 
pixel = zeros(3,1); 
pixMean = zeros(3,1,K); 
pixVar = ones(1,K); 
pixWeight = zeros(1,K); 

%global pixel process components 
globalWeight = (ones(height,width,K)/sideWeight); 
globalWeight(:,:,1) = T; 
%globalWeight = (ones(height,width,K)/K); 
globalMean = zeros(height,width,3,K); 
globalVar = ones(height,width,K); 




%===================================================== 
%Extract Foreground and Background by K-mixture model 
%===================================================== 
%initialize g-mixture model 
globalVar = globalVar*initVariance; 

for k=1:K 
globalMean(:,:,1,k)=initFrame(:,:,1); 
globalMean(:,:,2,k)=initFrame(:,:,2); 
globalMean(:,:,3,k)=initFrame(:,:,3); 
end; 


distVec = zeros(numFrames,1); 

%adaptive g-mixture background segmentation 
for frameIndex=2:numFrames 
%get current frame and the refernece frame 
%tic; 
frameIndex 
currentFrame = double(read(v,frameIndex)); 



if (frameIndex<=referenceDistance) 
referenceFrame= double(read(v,1)); 
else 
referenceFrame= double(read(v,frameIndex-referenceDistance)); 
end; 

frameDifference = abs(currentFrame - referenceFrame); 

%creates map of pixel locations where we will perform adaptive process. Based 
%upon threshold that detects low change regions based on previous frame in 
%order to save computation. 
pixThreshMap = min(sum(+(frameDifference(:,:,:)>pixelThresh),3),1); 



for index=1:3 
    backFrame(:,:,index)=(+(pixThreshMap(:,:)==0)).*currentFrame(:,:,index);  
end; 

%extract the parts considered "stable background" from current frame 

%reset foreground frame 
foreFrame = ones(height,width,3)*255; 

%gaussian mixture matching & model updating 



[i,j]=find(pixThreshMap(:,:)==1); 


%loop through every pixel location where adaptive process should be performed 
for k = 1:size(i,1) 

    pixel = reshape(currentFrame(i(k),j(k),:),3,1); 
    pixMean = reshape(globalMean(i(k),j(k),:,:),3,1,K); 
    pixVar = reshape(globalVar(i(k),j(k),:,:),1,K); 
    pixWeight=reshape(globalWeight(i(k),j(k),:),1,K); 

    %update gaussian mixture according to new pix value 
    match=matchingCriterion(pixMean,pixVar,pixel); 
    matchWeight = 0; 

    if(match>0) 
     %match found so update weights/normalize 
     pixWeight = (1-alpha)*pixWeight; 
     pixWeight(match)= pixWeight(match) + alpha; 
     pixWeight = pixWeight/sum(pixWeight); 
     matchWeight = pixWeight(1,match); 

     %NOTE ALPHA SHOULD BE REPACED WITH SOME KIND OF RHO EVENTUALLY 
     %WHERE RHO IS PRODUCT OF ALPHA AND CONDITIONAL PROBABILITY MEASURE 

     %update variance 
     pixVar(:,match) = (1-alpha)*pixVar(:,match) + ... 
     alpha*(pixel - pixMean(:,:,match))'*(pixel-pixMean(:,:,match)); 

     %update mean 
     pixMean(:,:,match) = (1-alpha)*pixMean(:,:,match) + alpha*pixel; 



    else 
     %no match currently found. 
     %replace one with lowest sigma value 

     rankVector = pixWeight./sqrt(pixVar(1,:)); 
     [mini minIndex] = min(rankVector); 

     pixMean(:,:,minIndex) = pixel; 
     pixVar(:,minIndex) = initVariance; 
    end 

    %rerank all pixel components 
    rankCriterionVector = pixWeight./sqrt(pixVar(1,1,:)); 
    [rankSort rankIndex] = sort(rankCriterionVector); 

    pixMean = pixMean(rankIndex); 
    pixVar = pixVar(rankIndex); 
    pixWeight = pixWeight(rankIndex); 


    %repopulate global structures with updated values 
    globalWeight(i(k),j(k),:) = pixWeight(:,1); 
    globalMean(i(k),j(k),:,:) = pixMean(:,1,:); 
    globalVar(i(k),j(k),:,:) = pixVar(:,:,:); 

    %now need to perform the background segmentation based upon weight 
    %threshold 

    bgIndex = segmentBackground(pixWeight); 
    if(ismember(matchWeight, pixWeight)) 
     matchIndex = find(pixWeight == matchWeight,1); 
    else 
     matchIndex = 0; 
    end 

    if((matchIndex>=bgIndex) || (matchIndex == 0)) 

     %also check against initFrame for match 
     %NOTE CHANGE 
     if(initMatch(initFrame(i(k),j(k),:),pixel) == 0) 
      foreFrame(i(k),j(k),:) = pixel; 
     end 

    end 


end 


%Now write foreground frame to foreground estimation video 
contrastFrame = foreFrame/max(abs(foreFrame(:))); 

%remove all connected components associated with foreground objects that are smaller than what we expect a vehicle should be. 
[cleanFrame centroids]= connectedComponentCleanup(contrastFrame); 

if(trackingOn == 1) 
if(size(centroids,1) > prevCentSize) 
    prevModel = addModel(prevModel, centroids, height, width); 
elseif (size(centroids,1) < prevCentSize) 
    prevModel = removeModel(prevModel, centroids, height, width); 
end 


if(size(centroids,1) > 0) 
    %implies there is a car in frame for tracking 
    [curModel orderedCentroids] = vehicleTracking(centroids, prevModel); 
    prevModel = curModel; 
    trackFrame = colorBox(cleanFrame, curModel,orderedCentroids, height, width); 
else 
    trackFrame = cleanFrame; 
end 

else 
    trackFrame = cleanFrame; 
end 

writeVideo(foregroundVideo,trackFrame); 
prevCentSize = size(centroids,1); 
end 

: 나는의 예를 들어 적응해야 할 몇 가지 변수를 발견했습니다

감사합니다.

+1

** 관련 예를 들어 볼 수 있었다, 프레임 사이의 객체를 추적하는 그 후 사용할 수 있습니다 : - ** http://www.mathworks.com/help/vision/examples/using -kalman-filter-for-object-tracking.html –

답변

4

기본적으로 이것은 칼만 필터에 대한 작업이 아니며, opencv에서 배경 빼기를보십시오. 칼만 필터는 여기에 예를 http://studentdavestutorials.weebly.com/kalman-filter-with-matlab-code.html

+0

예 비디오 파일을 다루고 있기 때문에 포어 그라운드에서 객체를 추적하기 위해 칼만 필터를 사용하고 싶습니다.이 경우에는 –

+1

btw로 제한하지 않습니다. 칼만 필터, 실제로 그것은 아주 기본적인 방법입니다. 당신은 비디오 객체 추적 도전에 대한 고급 것들을 찾아 볼 수 http://www.votchallenge.net/challenges.html –