39 object_input_view = 0;
40 model_and_input_images = 0;
42 homography_estimator=0;
50 match_score_threshold = .05f;
51 ransac_dist_threshold = 10;
52 ransac_stop_support = 50;
53 max_ransac_iterations = 500;
54 non_linear_refine_threshold = 3;
61 dont_use_bins_when_creating_model_points();
62 dont_use_bins_when_detecting_input_image_points();
64 index_of_model_point_to_debug = -1;
66 sample_number_for_refining = 1000;
68 max_detected_pts = 1000;
71 keypoint_distance_threshold = 1.5;
77 max_detected_pts = max;
82 new_images_generator.set_use_orientation_correction(
true);
87 new_images_generator.set_use_orientation_correction(
false);
92 new_images_generator.set_range_variation_for_theta(min_theta, max_theta);
97 new_images_generator.set_range_variation_for_phi(min_phi, max_phi);
101 float min_lambda2,
float max_lambda2)
103 new_images_generator.independent_scaling(min_lambda1, max_lambda1, min_lambda2, max_lambda2);
107 float min_lambda2,
float max_lambda2,
108 float min_l1_l2,
float max_l1_l2)
110 new_images_generator.constrained_scaling(min_lambda1, max_lambda1,
111 min_lambda2, max_lambda2,
112 min_l1_l2, max_l1_l2);
117 new_images_generator.set_use_random_background(use_random_background);
122 new_images_generator.set_noise_level(noise_level);
129 int max_point_number_on_model,
int patch_size,
130 int yape_radius,
int tree_number,
int nbLev,
133 new_images_generator.set_patch_size(patch_size);
136 if (model_image->nChannels == 1)
139 model = cvCreateImage(cvGetSize(model_image), IPL_DEPTH_8U, 1);
140 cvCvtColor(model_image, model, CV_RGB2GRAY);
144 new_images_generator.set_original_image(model,
150 new_images_generator.set_original_image(model);
152 if (model != model_image) cvReleaseImage(&model);
154 new_images_generator.set_level_number(nbLev);
155 new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
159 learn(max_point_number_on_model, patch_size, yape_radius, tree_number, nbLev, LearnProgress);
166 int max_point_number_on_model,
int patch_size,
167 int yape_radius,
int tree_number,
int nbLev,
170 new_images_generator.set_patch_size(patch_size);
172 string dirname(filename +
".classifier");
173 if (load(dirname))
return true;
176 if (model == 0 )
return false;
178 string roifn(filename +
".roi");
179 ifstream roif(roifn.data());
180 int u[4] = {-1, -1, -1, -1};
181 int v[4] = {-1, -1, -1, -1};
183 cout <<
"Reading ROI from file " << roifn <<
".\n";
184 for (
int i = 0; i < 4; i++) {
191 new_images_generator.set_original_image(model, u[0], v[0], u[1], v[1], u[2], v[2], u[3], v[3]);
192 new_images_generator.set_level_number(nbLev);
193 new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
197 learn(max_point_number_on_model, patch_size, yape_radius, tree_number, nbLev, LearnProgress);
209 load(directory_name);
215 char parameter_filename[1000];
216 sprintf(parameter_filename,
"%s/parameters.txt", directory_name.data());
217 ifstream param_f(parameter_filename);
221 if (!param_f.good())
return false;
222 param_f >> yape_radius;
226 new_images_generator.set_level_number(nbLev);
227 new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
230 char image_name[1000];
231 sprintf(image_name,
"%s/original_image.bmp", directory_name.data());
232 IplImage * original_image =
mcvLoadImage(image_name, 0);
233 if (original_image == 0)
return false;
236 char corner_filename[1000];
237 sprintf(corner_filename,
"%s/corners.txt", directory_name.data());
238 ifstream cf(corner_filename);
240 if (!cf.good())
return false;
241 int u_corner1, v_corner1, u_corner2, v_corner2, u_corner3, v_corner3, u_corner4, v_corner4;
242 cf >> u_corner1 >> v_corner1;
243 cf >> u_corner2 >> v_corner2;
244 cf >> u_corner3 >> v_corner3;
245 cf >> u_corner4 >> v_corner4;
247 new_images_generator.set_original_image(original_image,
248 u_corner1, v_corner1, u_corner2, v_corner2, u_corner3, v_corner3, u_corner4, v_corner4);
249 cvReleaseImage(&original_image);
252 char point_filename[1000];
253 sprintf(point_filename,
"%s/keypoints.txt", directory_name.data());
254 ifstream pf(point_filename);
255 if (!pf.good())
return false;
256 pf >> model_point_number;
259 for(
int i = 0; i < model_point_number; i++) {
260 pf >> model_points[i].
M[0] >> model_points[i].M[1] >> model_points[i].M[2] >> model_points[i].scale;
261 model_points[i].class_index = i;
262 assert(model_points[i].scale < nbLev);
264 cerr <<
"problem while reading keypoint #" << i <<
" in " << point_filename << endl;
270 new_images_generator.set_object_keypoints(model_points, model_point_number);
275 if ( !forest->load(directory_name) )
284 detected_point_views[i].alloc(forest->image_width);
285 match_probabilities[i] =
new float[model_point_number];
288 point_detector =
new pyr_yape(new_images_generator.original_image->width, new_images_generator.original_image->height, nbLev);
289 point_detector->set_radius(yape_radius);
298 object_input_view =
new object_view(new_images_generator.original_image->width,
299 new_images_generator.original_image->height,
300 new_images_generator.level_number);
309 model_and_input_images = 0;
314 if (object_input_view)
delete object_input_view;
315 if (point_detector)
delete point_detector;
317 if (model_points != 0)
318 delete [] model_points;
324 if (match_probabilities[i])
325 delete [] match_probabilities[i];
328 if (detected_points)
delete[] detected_points;
329 if (detected_point_views)
delete[] detected_point_views;
330 if (affine_motion)
delete affine_motion;
332 if (homography_estimator)
delete homography_estimator;
336 int yape_radius,
int tree_number,
int nbLev,
339 if (point_detector)
delete point_detector;
340 point_detector =
new pyr_yape(new_images_generator.original_image->width, new_images_generator.original_image->height, nbLev);
341 point_detector->set_radius(yape_radius);
342 point_detector->set_use_bins(use_bins_for_model_points);
344 detect_most_stable_model_points(max_point_number_on_model, patch_size, views_number, min_view_rate, LearnProgress);
346 save_image_of_model_points(patch_size);
351 forest->create_trees_at_random();
354 forest->refine( &new_images_generator, sample_number_for_refining);
355 forest->test( &new_images_generator, 50);
359 detected_point_views[i].alloc(patch_size);
360 match_probabilities[i] =
new float[model_point_number];
367 mkdir(directory_name.data(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IWOTH);
369 _mkdir(directory_name.data());
372 char image_name[1000];
373 sprintf(image_name,
"%s/original_image.bmp", directory_name.data());
374 mcvSaveImage(image_name, new_images_generator.original_image);
376 char corner_filename[1000];
377 sprintf(corner_filename,
"%s/corners.txt", directory_name.data());
378 ofstream cf(corner_filename);
379 cf << new_images_generator.u_corner1 <<
" " << new_images_generator.v_corner1 << endl;
380 cf << new_images_generator.u_corner2 <<
" " << new_images_generator.v_corner2 << endl;
381 cf << new_images_generator.u_corner3 <<
" " << new_images_generator.v_corner3 << endl;
382 cf << new_images_generator.u_corner4 <<
" " << new_images_generator.v_corner4 << endl;
385 char parameter_filename[1000];
386 sprintf(parameter_filename,
"%s/parameters.txt", directory_name.data());
387 ofstream param_f(parameter_filename);
388 param_f << point_detector->get_radius() << endl;
389 param_f << new_images_generator.level_number << endl;
392 char point_filename[1000];
393 sprintf(point_filename,
"%s/keypoints.txt", directory_name.data());
394 ofstream pf(point_filename);
395 pf << model_point_number << endl;
396 for(
int i = 0; i < model_point_number; i++)
397 pf << model_points[i].M[0] <<
" " << model_points[i].M[1]
398 <<
" " << model_points[i].M[2]
399 <<
" " << model_points[i].scale << endl;
402 forest->save(directory_name);
407 check_target_size(input_image);
408 point_detector->set_use_bins(use_bins_for_input_image);
409 point_detector->set_tau(10);
411 detected_point_number = point_detector->pyramidBlurDetect(input_image,
412 detected_points, max_detected_pts,
413 &object_input_view->image);
418 int patch_size = forest->image_width;
420 object_input_view->comp_gradient();
422 for(
int i = 0; i < detected_point_number; i++)
425 pv->
point2d = &(detected_points[i]);
429 if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
430 v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
432 new_images_generator.preprocess_point_view(pv, object_input_view);
443 int patch_size = forest->image_width;
445 for(
int i = 0; i < detected_point_number; i++)
451 if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
452 v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
454 forest->posterior_probabilities(pv, match_probabilities[i]);
456 if (fill_match_struct) {
457 int model_point_index = 0;
458 float score = match_probabilities[i][model_point_index];
459 for(
int j = 1; j < model_point_number; j++)
460 if (match_probabilities[i][j] > score)
462 score = match_probabilities[i][j];
463 model_point_index = j;
467 match->
object_point = &(model_points[model_point_index]);
468 match->
score = score;
474 memset(match_probabilities[i], 0,
sizeof(
float) * model_point_number);
480 detect_points(input_image);
485 object_is_detected =
false;
487 object_is_detected = estimate_affine_transformation();
489 affine_motion->transform_point(
float(new_images_generator.u_corner1),
490 float(new_images_generator.v_corner1), &detected_u_corner1, &detected_v_corner1);
491 affine_motion->transform_point(
float(new_images_generator.u_corner2),
492 float(new_images_generator.v_corner2), &detected_u_corner2, &detected_v_corner2);
493 affine_motion->transform_point(
float(new_images_generator.u_corner3),
494 float(new_images_generator.v_corner3), &detected_u_corner3, &detected_v_corner3);
495 affine_motion->transform_point(
float(new_images_generator.u_corner4),
496 float(new_images_generator.v_corner4), &detected_u_corner4, &detected_v_corner4);
498 if (object_is_detected)
500 object_is_detected = estimate_homographic_transformation_nonlinear_method();
502 if (object_is_detected)
504 H->transform_point(
float(new_images_generator.u_corner1),
505 float(new_images_generator.v_corner1), &detected_u_corner1, &detected_v_corner1);
506 H->transform_point(
float(new_images_generator.u_corner2),
507 float(new_images_generator.v_corner2), &detected_u_corner2, &detected_v_corner2);
508 H->transform_point(
float(new_images_generator.u_corner3),
509 float(new_images_generator.v_corner3), &detected_u_corner3, &detected_v_corner3);
510 H->transform_point(
float(new_images_generator.u_corner4),
511 float(new_images_generator.v_corner4), &detected_u_corner4, &detected_v_corner4);
517 object_is_detected =
true;
526 return object_is_detected;
536 if (match_number == 0)
542 *n1 =
rand() % match_number;
543 shot++;
if (shot > 100)
return false;
545 while(matches[*n1].score < match_score_threshold);
550 *n2 =
rand() % match_number;
551 shot++;
if (shot > 100)
return false;
553 while(matches[*n2].score < match_score_threshold || *n2 == *n1);
558 *n3 =
rand() % match_number;
559 shot++;
if (shot > 100)
return false;
561 while(matches[*n3].score < match_score_threshold || *n3 == *n1 || *n3 == *n2);
566 #define RANSAC_DIST_THRESHOLD ransac_dist_threshold
574 for(
int i = 0; i < match_number; i++)
575 matches[i].inlier =
false;
580 for(
int i = 0; i < match_number; i++)
609 if (det < 0. || det > 4 * 4)
620 int best_support = -1;
628 if (!three_random_correspondences(&n1, &n2, &n3))
660 int support = compute_support_for_affine_transformation(A);
662 if (support > best_support)
664 best_support = support;
665 cvmCopy(A, affine_motion);
666 if (best_support > ransac_stop_support)
669 }
while(iteration < max_ransac_iterations);
675 int support = compute_support_for_affine_transformation(affine_motion);
682 int inlier_number = 0;
685 for(i = 0; i < match_number; i++)
686 if (matches[i].inlier)
689 if (inlier_number < 10)
691 object_is_detected =
false;
695 H->set_match_number(inlier_number);
696 for(i = 0; i < match_number; i++)
697 if (matches[i].inlier)
699 PyrImage::convCoordf(
float(matches[i].object_point->M[0]),
int(matches[i].object_point->scale), 0),
700 PyrImage::convCoordf(
float(matches[i].object_point->M[1]),
int(matches[i].object_point->scale), 0),
716 double den = 1. / (state[6] * d[0] + state[7] * d[1] + 1.);
718 b[0] = (state[0] * d[0] + state[1] * d[1] + state[2]) * den;
719 b[1] = (state[3] * d[0] + state[4] * d[1] + state[5]) * den;
738 J[14] = -b[1] * J[11];
739 J[15] = -b[1] * J[12];
745 int inlier_number = 0;
747 for(
int i = 0; i < match_number; i++)
748 if (matches[i].inlier)
751 if (inlier_number < 10)
753 object_is_detected =
false;
757 homography_estimator->set_verbose_level(0);
759 homography_estimator->reset_observations();
760 for(
int i = 0; i < match_number; i++)
761 if (matches[i].inlier) {
763 PyrImage::convCoordf(
float(matches[i].object_point->M[0]),
int(matches[i].object_point->scale), 0),
764 PyrImage::convCoordf(
float(matches[i].object_point->M[1]),
int(matches[i].object_point->scale), 0)};
768 homography_estimator->add_observation_2data_2measures(
homography_error, d, b);
771 double initial_state[8] = {affine_motion->cvmGet(0, 0), affine_motion->cvmGet(0, 1), affine_motion->cvmGet(0, 2),
772 affine_motion->cvmGet(1, 0), affine_motion->cvmGet(1, 1), affine_motion->cvmGet(1, 2),
774 homography_estimator->minimize_using_levenberg_marquardt_from(initial_state);
776 double * state = homography_estimator->state;
778 H->cvmSet(0, 0, state[0]); H->cvmSet(0, 1, state[1]); H->cvmSet(0, 2, state[2]);
779 H->cvmSet(1, 0, state[3]); H->cvmSet(1, 1, state[4]); H->cvmSet(1, 2, state[5]);
780 H->cvmSet(2, 0, state[6]); H->cvmSet(2, 1, state[7]); H->cvmSet(2, 2, 1.);
782 homography_estimator->reset_observations();
783 for (
int iter=0; iter<2; iter++) {
785 for (
int i=0; i<match_number; i++) {
786 double tu =
PyrImage::convCoordf(matches[i].image_point->u,
int(matches[i].image_point->scale), 0);
787 double tv =
PyrImage::convCoordf(matches[i].image_point->v,
int(matches[i].image_point->scale), 0);
788 double mu =
PyrImage::convCoordf(
float(matches[i].object_point->M[0]),
int(matches[i].object_point->scale), 0);
789 double mv =
PyrImage::convCoordf(
float(matches[i].object_point->M[1]),
int(matches[i].object_point->scale), 0);
791 H->transform_point(mu, mv, &hmu, &hmv);
795 if (sqrt(du*du+dv*dv)< non_linear_refine_threshold) {
796 matches[i].inlier=
true;
801 homography_estimator->add_observation_2data_2measures(
homography_error, d, b);
804 matches[i].inlier=
false;
807 if (iter==0 && inlier_number>10){
808 homography_estimator->minimize_using_levenberg_marquardt_from(state);
809 state = homography_estimator->state;
811 H->cvmSet(0, 0, state[0]); H->cvmSet(0, 1, state[1]); H->cvmSet(0, 2, state[2]);
812 H->cvmSet(1, 0, state[3]); H->cvmSet(1, 1, state[4]); H->cvmSet(1, 2, state[5]);
813 H->cvmSet(2, 0, state[6]); H->cvmSet(2, 1, state[7]); H->cvmSet(2, 2, 1.);
816 if (inlier_number < 10){
817 object_is_detected =
false;
821 homography_estimator->reset_observations();
827 int u,
int v,
int patch_size,
834 IplImage * p =
mcvGetPatch(image_before_smoothing, u, v, patch_size, patch_size);
835 sprintf(name,
"patches/kp_%03d_%04d.bmp", point_index, call_number);
836 printf(
"Saving image in %s\n", name);
840 sprintf(name,
"patches/kp_%03d_%04d_corrected.bmp", point_index, call_number);
847 float cu,
float cv,
int scale)
849 for(vector< pair<object_keypoint, int> >::iterator it = tmp_model_points->begin();
850 it < tmp_model_points->end();
852 if (
int(it->first.scale) == scale)
855 (float(it->first.M[0]) - cu) * (
float(it->first.M[0]) - cu) +
856 (
float(it->first.M[1]) - cv) * (
float(it->first.M[1]) - cv);
858 if (dist2 < keypoint_distance_threshold * keypoint_distance_threshold)
867 return p1.second > p2.second;
873 int view_nb,
double min_view_rate,
876 cout <<
"Determining most stable points:" << endl;
878 vector< pair<object_keypoint, int> > tmp_model_points;
884 int model_point2d_number = point_detector->pyramidBlurDetect(new_images_generator.original_image, model_points_2d,
885 K * max_point_number_on_model);
888 model_point_number = 0;
890 for(
int i = 0; i < model_point2d_number; i++)
893 if (new_images_generator.inside_roi(
897 model_points[model_point_number].M[0] = k->
u;
898 model_points[model_point_number].M[1] = k->
v;
899 model_points[model_point_number].M[2] = 0;
900 model_points[model_point_number].scale = k->
scale;
902 model_point_number++;
904 if (model_point_number >= max_point_number_on_model)
909 save_image_of_model_points(patch_size,
"initial_model_points.bmp");
910 delete [] model_points;
914 for(
int i = 0; i < model_point2d_number; i++)
918 if (new_images_generator.inside_roi(
922 pair<object_keypoint, int> * mp = search_for_existing_model_point(&tmp_model_points, k->
u, k->
v,
int(k->
scale));
926 double n = double(++mp->second);
927 mp->first.M[0] = (mp->first.M[0]*(n-1) + k->
u)/n;
928 mp->first.M[1] = (mp->first.M[1]*(n-1) + k->
v)/n;
937 tmp_model_points.push_back(pair<object_keypoint, int>(op, 1));
944 bool use_random_background = new_images_generator.use_random_background;
945 new_images_generator.set_use_random_background(
false);
947 for(
int j = 0; j < view_nb; j++)
949 if (LearnProgress!=0)
952 cout <<
"Generating views: " << view_nb - j <<
" \r" << flush;
954 new_images_generator.generate_random_affine_transformation();
955 new_images_generator.generate_object_view();
957 int current_detected_point_number = point_detector->detect(&(new_images_generator.smoothed_generated_object_view->image),
958 model_points_2d, K * max_point_number_on_model);
960 for(
int i = 0; i < current_detected_point_number; i++)
975 if (new_images_generator.inside_roi(
979 pair<object_keypoint, int> * mp = search_for_existing_model_point(&tmp_model_points, nu, nv,
int(k->
scale));
985 double n = double(++mp->second);
986 mp->first.M[0] = (mp->first.M[0]*(n-1) + nu)/n;
987 mp->first.M[1] = (mp->first.M[1]*(n-1) + nv)/n;
996 tmp_model_points.push_back(pair<object_keypoint, int>(op, 1));
1002 new_images_generator.set_use_random_background(use_random_background);
1006 int min_views = int(min_view_rate * (
double)view_nb);
1009 vector< pair<object_keypoint, int> >::iterator it;
1011 for( it = tmp_model_points.begin(), i = 0;
1012 (it < tmp_model_points.end()) && (i < max_point_number_on_model) && (it->second >= min_views);
1015 model_points[i].
M[0] = it->first.M[0];
1016 model_points[i].M[1] = it->first.M[1];
1017 model_points[i].M[2] = it->first.M[2];
1018 model_points[i].scale = it->first.scale;
1019 model_points[i].class_index = i;
1022 cout <<
"Point " << i <<
" detected " << it->second <<
" times." << endl;
1024 cout <<
"... Point " << i <<
" detected " << it->second <<
" times ("
1025 << view_nb <<
" generated views, "
1026 << min_view_rate*100 <<
"% minimum rate)." << endl;
1028 model_point_number = i;
1030 new_images_generator.set_object_keypoints(model_points, model_point_number);
1032 delete [] model_points_2d;
1041 IplImage* model_image =
mcvGrayToColor(new_images_generator.original_image);
1043 for(
int i = 0; i < model_point_number; i++)
1044 cvCircle(model_image,
1055 cvReleaseImage(&model_image);
1060 bool p_draw_matches,
1062 bool p_draw_model_image,
1065 concat_model_and_input_images(input_image, p_draw_model_image);
1067 if (p_draw_points) draw_points(line_width);
1068 if (p_draw_model_image)
1070 if (p_draw_matches && object_is_detected) draw_matches(line_width);
1071 if (p_draw_object && object_is_detected) draw_model();
1074 if (p_draw_object && object_is_detected) draw_model();
1076 IplImage *im= model_and_input_images;
1077 model_and_input_images=0;
1083 CvPoint C1 =
cvPoint(
int(u_input_image + detected_u_corner1),
int(v_input_image + detected_v_corner1));
1084 CvPoint C2 =
cvPoint(
int(u_input_image + detected_u_corner2),
int(v_input_image + detected_v_corner2));
1085 CvPoint C3 =
cvPoint(
int(u_input_image + detected_u_corner3),
int(v_input_image + detected_v_corner3));
1086 CvPoint C4 =
cvPoint(
int(u_input_image + detected_u_corner4),
int(v_input_image + detected_v_corner4));
1089 col = cvScalar(0, 0, 0);
1090 cvLine(model_and_input_images, C1, C2, col, 6);
1091 cvLine(model_and_input_images, C2, C3, col, 6);
1092 cvLine(model_and_input_images, C3, C4, col, 6);
1093 cvLine(model_and_input_images, C4, C1, col, 6);
1095 col = cvScalar(255, 255, 255);
1096 cvLine(model_and_input_images, C1, C2, col, 2);
1097 cvLine(model_and_input_images, C2, C3, col, 2);
1098 cvLine(model_and_input_images, C3, C4, col, 2);
1099 cvLine(model_and_input_images, C4, C1, col, 2);
1104 int patch_size = forest->image_width;
1106 for(
int i = 0; i < detected_point_number; i++)
1108 concat_model_and_input_images(input_image);
1109 draw_input_image_points();
1115 if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
1116 v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
1121 cvCircle(model_and_input_images, ip,
1124 float best_P = match_probabilities[i][0];
1125 int index_best_P = 0;
1126 for(
int j = 0; j < model_point_number; j++)
1128 if (match_probabilities[i][j] > 0.05)
1131 int level = MIN(255,
int(match_probabilities[i][j] / 0.2 * 255));
1132 CvScalar col = cvScalar(level, level, level);
1139 cvLine(model_and_input_images, ip, mp, col, 1);
1141 if (best_P < match_probabilities[i][j])
1143 best_P = match_probabilities[i][j];
1150 int level = MIN(255,
int(match_probabilities[i][index_best_P] / 0.2 * 255));
1151 CvScalar col = cvScalar(level, level, level);
1157 cvLine(model_and_input_images, ip, mp, col, 1);
1161 char filename[10000];
1162 sprintf(filename,
"%s/match%04d.bmp", matches_dir, i);
1170 int patch_size = forest->image_width;
1172 for(
int j = 0; j < model_point_number; j++)
1174 concat_model_and_input_images(input_image);
1180 cvCircle(model_and_input_images, mp,
1182 cvCircle(model_and_input_images, mp,
1185 float best_P = match_probabilities[0][j];
1186 int index_best_P = 0;
1187 for(
int i = 0; i < detected_point_number; i++)
1193 if (u > (patch_size / 2) && u < object_input_view->image[s]->width - (patch_size / 2) &&
1194 v > (patch_size / 2) && v < object_input_view->image[s]->height - (patch_size / 2))
1196 if (best_P < match_probabilities[i][j])
1198 best_P = match_probabilities[i][j];
1212 cvCircle(model_and_input_images, ip,
1214 cvScalar(0, 0, 0), 3);
1215 cvLine(model_and_input_images, ip, mp, cvScalar(0, 0, 0), 3);
1217 cvCircle(model_and_input_images, ip,
1219 cvScalar(255, 255, 255), 2);
1220 cvLine(model_and_input_images, ip, mp, cvScalar(255, 255, 255), 2);
1223 char image_name[1000];
1224 sprintf(image_name,
"%s/match%04d.bmp", matches_dir, j);
1232 int patch_size = forest->image_width;
1234 for(
int i = 0; i < match_number; i++)
1241 concat_model_and_input_images(input_image);
1246 cvCircle(model_and_input_images,
1248 cvCircle(model_and_input_images,
1254 cvCircle(model_and_input_images,
1256 cvCircle(model_and_input_images,
1259 char image_name[1000];
1260 sprintf(image_name,
"%s/match%04d.bmp", matches_dir, i);
1268 if (model_and_input_images != 0)
1269 cvReleaseImage(&model_and_input_images);
1271 if (draw_model_image)
1273 model_and_input_images =
1274 cvCreateImage(cvSize(max(new_images_generator.original_image->width, input_image->width),
1275 new_images_generator.original_image->height + input_image->height + 10), IPL_DEPTH_8U, 3);
1277 cvSet(model_and_input_images, cvScalar(128, 128, 128));
1281 mcvPut(model_and_input_images, new_images_generator.original_image, x0, y0);
1284 v_input_image = new_images_generator.original_image->height + 10;
1285 mcvPut(model_and_input_images, input_image, u_input_image, v_input_image);
1289 model_and_input_images = cvCloneImage(input_image);
1301 draw_model_points(line_width);
1302 draw_input_image_points(line_width);
1307 int patch_size = forest->image_width;
1309 for(
int i = 0; i < model_point_number; i++)
1310 cvCircle(model_and_input_images,
1313 (
int)
PyrImage::convCoordf(patch_size/2.f,
int(model_points[i].scale), 0), CV_RGB(0, 0, 255), line_width);
1318 int patch_size = forest->image_width;
1320 for(
int i = 0; i < detected_point_number; i++)
1321 cvCircle(model_and_input_images,
1323 v_input_image + (
int)
PyrImage::convCoordf(detected_points[i].v,
int(detected_points[i].scale), 0)),
1330 for(
int i = 0; i < match_number; i++)
1336 CvScalar col = cvScalar(255, 255, 255);
1338 cvLine(model_and_input_images,
1350 for(
int i = 0; i < match_number; i++)
1356 cvLine(model_and_input_images,
1359 CV_RGB(255,0,0), line_width);
1369 int w = image->width;
1370 int h = image->height;
1371 int nbLev = new_images_generator.level_number;
1373 if (object_input_view->image[0]->width == w &&
1374 object_input_view->image[0]->height ==h)
1381 if (new_images_generator.orientation_corrector != 0)
1382 delete new_images_generator.orientation_corrector;
1384 w,h, new_images_generator.patch_size, nbLev);
1386 delete object_input_view;
1388 int yape_radius = point_detector->get_radius();
1389 delete point_detector;
1390 point_detector =
new pyr_yape(w,h,nbLev);
1391 point_detector->set_radius(yape_radius);