A multi camera geometric and photmetric calibration example which depends only on OpenCV.
#include <iostream>
#include <vector>
#include <cv.h>
#include <highgui.h>
#ifdef HAVE_CONFIG_H
#endif
void usage(
const char *s) {
cerr << "usage:\n" << s
<< "[-m <model image>] [-r]\n"
" -m specifies model image\n"
" -r do not load any data\n"
" -t train a new classifier\n"
" -g recompute geometric calibration\n"
" -l rebuild irradiance map from scratch\n";
exit(1);
}
int main(
int argc,
char** argv )
{
bool redo_geom=false;
bool redo_training=false;
bool redo_lighting=false;
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-m") ==0) {
if (i==argc-1)
usage(argv[0]);
modelFile = argv[i+1];
i++;
} else if (strcmp(argv[i], "-r")==0) {
redo_geom=redo_training=redo_lighting=true;
} else if (strcmp(argv[i], "-g")==0) {
redo_geom=redo_lighting=true;
} else if (strcmp(argv[i], "-l")==0) {
redo_lighting=true;
} else if (strcmp(argv[i], "-t")==0) {
redo_training=true;
} else if (argv[i][0]=='-') {
}
}
if( multi.
init(!redo_training) ==0 )
{
cerr <<"Initialization error.\n";
return -1;
}
cout << "Starting geometric calibration.\n";
cerr << "Geometric calibration failed.\n";
return 2;
}
cout << "Geometric calibration OK. Calibrating light...\n";
}
{
return true;
}
const char *win = "BazAR";
cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
for (
int i=0; i<multi.
cams.size(); ++i) {
}
IplImage *display=0;
bool success=false;
bool end=false;
int dispCam=0;
int nbHomography =0;
while (!end)
{
int nbdet=0;
for (
int i=0; i<multi.
cams.size(); ++i) {
if (multi.
cams[i]->detect()) nbdet++;
}
if (nbdet>0) {
for (
int i=0; i<multi.
cams.size(); ++i) {
if (multi.
cams[i]->detector.object_is_detected) {
} else {
}
}
nbHomography++;
}
if (nbHomography >=200) {
120,
(multi.
cams.size() > 1 ? 1:2),
3,
.5,
0,
0,
0.0078125,
0.9,
0.001953125,
12,
0.05,
3
))
{
success=true;
break;
}
}
if (display==0) display = cvCreateImage(cvGetSize(multi.
cams[dispCam]->frame), IPL_DEPTH_8U, 3);
cvShowImage(win, display);
int k=cvWaitKey(10);
switch (k) {
case 'q':
case 27: end=true; break;
case 'n':
if(dispCam < multi.
cams.size()-1) {
cvReleaseImage(&display);
++dispCam;
}
cout << "Current cam: " << dispCam << endl;
break;
case 'p': if(dispCam > 0) {
cvReleaseImage(&display);
--dispCam;
}
cout << "Current cam: " << dispCam << endl;
break;
case -1: break;
default: cout << (char)k <<": What ?\n";
}
}
if (display) cvReleaseImage(&display);
return true;
}
return false;
}
{
if (cache) model.map.load();
const char *win = "BazAR";
cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
IplImage *display=0;
bool success=false;
bool end=false;
int dispCam=0;
while (!end)
{
int nbdet=0;
for (
int i=0; i<multi.
cams.size(); ++i) {
if (multi.
cams[i]->detect()) nbdet++;
}
if (nbdet>0) {
model.augm.Clear();
for (
int i=0; i<multi.
cams.size(); ++i) {
if (multi.
cams[i]->detector.object_is_detected) {
} else {
model.augm.AddHomography();
}
}
frameOK = model.augm.Accomodate(4, 1e-4);
}
if (display==0) display = cvCreateImage(cvGetSize(multi.
cams[dispCam]->frame), IPL_DEPTH_8U, 3);
if (frameOK) {
CvMat *mat = model.augm.GetObjectToWorld();
float normal[3];
for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
cvReleaseMat(&mat);
for (
int i=0; i<multi.
cams.size();++i) {
if (multi.
cams[i]->detector.object_is_detected) {
nbLightMeasures++;
model.map.addNormal(normal, *multi.
cams[i]->lc, i);
}
}
if (!model.map.isReady() && nbLightMeasures > 40) {
if (model.map.computeLightParams()) {
model.map.save();
}
}
} else {
cvCopy( multi.
cams[dispCam]->frame, display);
}
cvShowImage(win, display);
int k=cvWaitKey(10);
switch (k) {
case 'q':
case 27: end=true; break;
case 'n':
if(dispCam < multi.
cams.size()-1) {
cvReleaseImage(&display);
++dispCam;
}
cout << "Current cam: " << dispCam << endl;
break;
case 'p': if(dispCam > 0) {
cvReleaseImage(&display);
--dispCam;
}
cout << "Current cam: " << dispCam << endl;
break;
case -1: break;
default: cout << (char)k <<": What ?\n";
}
}
if (display) cvReleaseImage(&display);
if (success && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
return true;
}
return false;
return false;
}
{
cvCopy(video, dst);
cvCircle(dst,
3, CV_RGB(0,255,0), -1, 8,0);
}
}
}
}
{
cvCopy(frame, display);
if (!m) return;
double pts[4][4];
double proj[4][4];
CvMat ptsMat, projMat;
cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
for (int i=0; i<4; i++) {
pts[2][i] = 0;
pts[3][i] = 1;
}
cvMatMul(m, &ptsMat, &projMat);
cvReleaseMat(&m);
CvPoint projPts[4];
for (int i=0;i<4; i++) {
projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
}
CvScalar color = cvScalar(128,128,128,128);
float normal[3];
for (int j=0;j<3;j++)
normal[j] = cvGet2D(o2w, j, 2).val[0];
cvReleaseMat(&o2w);
for (int i=0; i<3; i++) {
color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
}
}
cvFillConvexPoly(display, projPts, 4, color);
}