This is a more complete example that computes geometric and photometric calibration and augmentation with a single camera.
#include <iostream>
#include "cv.h"
#include "highgui.h"
#ifdef HAVE_CONFIG_H
#endif
#include "calibmodel.h"
int nbImages, bool cache);
void usage(
const char *s) {
cerr << "usage:\n" << s
<< "[<cam number>|<video file>] [-m <model image>] [-r]\n"
" -m specifies model image\n"
" -r do not load any data\n"
" -t train a new classifier\n"
" -g recompute geometric calibration\n"
" -l rebuild irradiance map from scratch\n";
exit(1);
}
int main(
int argc,
char** argv )
{
CvCapture* capture = 0;
const char *captureSrc = "0";
bool redo_geom=false;
bool redo_training=false;
bool redo_lighting=false;
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-m") ==0) {
if (i==argc-1)
usage(argv[0]);
modelFile = argv[i+1];
i++;
} else if (strcmp(argv[i], "-r")==0) {
redo_geom=redo_training=redo_lighting=true;
} else if (strcmp(argv[i], "-g")==0) {
redo_geom=redo_lighting=true;
} else if (strcmp(argv[i], "-l")==0) {
redo_lighting=true;
} else if (strcmp(argv[i], "-t")==0) {
redo_training=true;
} else if (argv[i][0]=='-') {
} else {
captureSrc = argv[i];
}
}
if(strlen(captureSrc) == 1 && isdigit(captureSrc[0]))
capture = cvCaptureFromCAM( captureSrc[0]-'0');
else
capture = cvCaptureFromAVI( captureSrc );
if( !capture )
{
cerr <<"Could not initialize capturing from " << captureSrc << " ...\n";
return -1;
}
if (!model.buildCached(capture, !redo_training)) {
cout << "model.buildCached() failed.\n";
return 1;
}
cout << "Model build. Starting geometric calibration.\n";
cerr << "Geometric calibration failed.\n";
return 2;
}
cout << "Geometric calibration OK. Calibrating light...\n";
}
{
return true;
}
const char *win = "BazAR";
IplImage*gray=0;
cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
IplImage* frame = cvQueryFrame(capture);
calib.
AddCamera(frame->width, frame->height);
IplImage* display=cvCloneImage(frame);
bool success=false;
int nbHomography =0;
while (1)
{
frame = cvQueryFrame( capture );
if( !frame )
break;
if (frame->nChannels >1) {
if( !gray )
gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
cvCvtColor(frame, gray, CV_RGB2GRAY);
} else {
gray = frame;
}
nbHomography++;
cout << nbHomography << " homographies.\n";
if (nbHomography >=70) {
50,
2,
3,
3,
0,
0,
0.0078125,
0.9,
0.001953125,
12,
0.05,
3
))
{
success=true;
break;
}
}
}
cvShowImage(win, display);
int k=cvWaitKey(10);
if (k=='q' || k== 27)
break;
}
cvReleaseImage(&display);
if (frame->nChannels > 1)
cvReleaseImage(&gray);
return true;
}
return false;
}
int nbImages, bool cache)
{
const char *win = "BazAR";
IplImage*gray=0;
cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE);
IplImage* frame = 0;
IplImage* display=cvCloneImage(cvQueryFrame(capture));
int nbHomography =0;
IplImage *lightmap = cvCreateImage(cvGetSize(model.
map.
map.
getIm()), IPL_DEPTH_8U,
lc.avgChannels);
while (1)
{
frame = cvQueryFrame( capture );
if( !frame )
break;
if (frame->nChannels >1) {
if( !gray )
gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
cvCvtColor(frame, gray, CV_RGB2GRAY);
} else {
gray = frame;
}
nbHomography++;
float normal[3];
for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
cvReleaseMat(&mat);
if (!model.
map.
isReady() && nbHomography >= nbImages) {
cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl;
cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl;
}
}
}
double min, max;
cvSetImageCOI(map, 2);
cvMinMaxLoc(map, &min, &max);
cvSetImageCOI(map, 0);
assert(map->nChannels == lightmap->nChannels);
cvConvertScale(map, lightmap, 128, 0);
cvShowImage("LightMap", lightmap);
} else {
cvCopy(frame,display);
}
cvShowImage(win, display);
int k=cvWaitKey(10);
if (k=='q' || k== 27)
break;
}
cvReleaseImage(&lightmap);
cvReleaseImage(&display);
if (frame->nChannels > 1)
cvReleaseImage(&gray);
return 0;
}
{
cvCopy(video, dst);
cvCircle(dst,
3, CV_RGB(0,255,0), -1, 8,0);
}
}
}
}
{
cvCopy(frame, display);
return;
if (!m) return;
double pts[4][4];
double proj[4][4];
CvMat ptsMat, projMat;
cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
for (int i=0; i<4; i++) {
pts[2][i] = 0;
pts[3][i] = 1;
}
cvMatMul(m, &ptsMat, &projMat);
cvReleaseMat(&m);
CvPoint projPts[4];
for (int i=0;i<4; i++) {
projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
}
float normal[3];
for (int j=0;j<3;j++)
normal[j] = cvGet2D(o2w, j, 2).val[0];
cvReleaseMat(&o2w);
CvScalar color = cvGet2D(model.
image, model.
image->height/2, model.
image->width/2);
for (int i=0; i<3; i++) {
color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
}
cvFillConvexPoly(display, projPts, 4, color);
}
{
static std::vector<CamCalibration::s_struct_points> pts;
pts.clear();
}
}
}
{
static std::vector<CamCalibration::s_struct_points> pts;
pts.clear();
}
}
return true;
}