<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<title>GSoC2011SfM: D:/Travail/These/Determination caracteristiques camera/GSoC/SfM/src/EuclideanEstimator.cpp Source File</title>

<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link href="doxygen.css" rel="stylesheet" type="text/css" />

<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
  $(document).ready(function() { searchBox.OnSelectItem(0); });
</script>

</head>
<body>
<div id="top"><!-- do not remove this div! -->


<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  
  <td id="projectlogo"><img alt="Logo" src="logo.png"/></td>
  
  
  <td style="padding-left: 0.5em;">
   <div id="projectname">GSoC2011SfM
   &#160;<span id="projectnumber">0.1</span>
   </div>
   <div id="projectbrief">Google Summer of Code 2011: Structure from motion</div>
  </td>
  
  
  
 </tr>
 </tbody>
</table>
</div>

<!-- Generated by Doxygen 1.7.5.1 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="annotated.html"><span>Classes</span></a></li>
      <li class="current"><a href="files.html"><span>Files</span></a></li>
      <li>
        <div id="MSearchBox" class="MSearchBoxInactive">
        <span class="left">
          <img id="MSearchSelect" src="search/mag_sel.png"
               onmouseover="return searchBox.OnSearchSelectShow()"
               onmouseout="return searchBox.OnSearchSelectHide()"
               alt=""/>
          <input type="text" id="MSearchField" value="Search" accesskey="S"
               onfocus="searchBox.OnSearchFieldFocus(true)" 
               onblur="searchBox.OnSearchFieldFocus(false)" 
               onkeyup="searchBox.OnSearchFieldChange(event)"/>
          </span><span class="right">
            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
          </span>
        </div>
      </li>
    </ul>
  </div>
  <div id="navrow2" class="tabs2">
    <ul class="tablist">
      <li><a href="files.html"><span>File&#160;List</span></a></li>
    </ul>
  </div>
<div class="header">
  <div class="headertitle">
<div class="title">D:/Travail/These/Determination caracteristiques camera/GSoC/SfM/src/EuclideanEstimator.cpp</div>  </div>
</div>
<div class="contents">
<div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 
<a name="l00002"></a>00002 
<a name="l00003"></a>00003 <span class="preprocessor">#include &lt;pcl/point_types.h&gt;</span>
<a name="l00004"></a>00004 <span class="preprocessor">#include &quot;libmv/multiview/five_point.h&quot;</span>
<a name="l00005"></a>00005 <span class="preprocessor">#include &quot;libmv/multiview/affine.h&quot;</span>
<a name="l00006"></a>00006 <span class="preprocessor">#include &quot;libmv/multiview/fundamental.h&quot;</span>
<a name="l00007"></a>00007 <span class="preprocessor">#include &quot;libmv/multiview/robust_fundamental.h&quot;</span>
<a name="l00008"></a>00008 
<a name="l00009"></a>00009 <span class="preprocessor">#include &lt;Eigen/Eigenvalues&gt;</span>
<a name="l00010"></a>00010 
<a name="l00011"></a>00011 <span class="preprocessor">#include &lt;pcl/io/vtk_io.h&gt;</span>
<a name="l00012"></a>00012 <span class="preprocessor">#include &lt;sstream&gt;</span>
<a name="l00013"></a>00013 
<a name="l00014"></a>00014 <span class="preprocessor">#include &quot;EuclideanEstimator.h&quot;</span>
<a name="l00015"></a>00015 <span class="preprocessor">#include &quot;StructureEstimator.h&quot;</span>
<a name="l00016"></a>00016 <span class="preprocessor">#include &quot;Camera.h&quot;</span>
<a name="l00017"></a>00017 <span class="preprocessor">#include &quot;Visualizer.h&quot;</span>
<a name="l00018"></a>00018 <span class="preprocessor">#include &quot;PCL_mapping.h&quot;</span>
<a name="l00019"></a>00019 <span class="preprocessor">#include &quot;bundle_related.h&quot;</span>
<a name="l00020"></a>00020 
<a name="l00021"></a>00021 <span class="keyword">using</span> std::vector;
<a name="l00022"></a>00022 <span class="keyword">using</span> cv::Ptr;
<a name="l00023"></a>00023 
<a name="l00024"></a>00024 <span class="keyword">namespace </span>OpencvSfM{
<a name="l00025"></a>00025   <span class="comment">//the next two functions are only for intern usage, no external interface...</span>
<a name="l00026"></a>00026 
<a name="l00030"></a>00030   <span class="keywordtype">double</span> SampsonDistance2( <span class="keyword">const</span> libmv::Mat &amp;F,
<a name="l00031"></a>00031     <span class="keyword">const</span> libmv::Mat2X &amp;x1, <span class="keyword">const</span> libmv::Mat2X &amp;x2 ) {
<a name="l00032"></a>00032     <span class="keywordtype">double</span> error_total= 0.0;
<a name="l00033"></a>00033     libmv::Vec2 x1_i,x2_i;
<a name="l00034"></a>00034     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> n_points = x1.rows( );
<a name="l00035"></a>00035     <span class="keywordflow">for</span>( <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i &lt; n_points ; ++i )
<a name="l00036"></a>00036     {
<a name="l00037"></a>00037       x1_i = x1.col( i );
<a name="l00038"></a>00038       x2_i = x2.col( i );
<a name="l00039"></a>00039 
<a name="l00040"></a>00040       error_total += libmv::SampsonDistance( F, x1_i, x2_i );
<a name="l00041"></a>00041     }
<a name="l00042"></a>00042 
<a name="l00043"></a>00043     <span class="keywordflow">return</span> error_total;
<a name="l00044"></a>00044   }
<a name="l00045"></a>00045 
<a name="l00049"></a>00049   <span class="keywordtype">double</span> robust5Points( <span class="keyword">const</span> libmv::Mat2X &amp;x1, <span class="keyword">const</span> libmv::Mat2X &amp;x2,
<a name="l00050"></a>00050     <span class="keyword">const</span> libmv::Mat3 &amp;K1, <span class="keyword">const</span> libmv::Mat3 &amp;K2,
<a name="l00051"></a>00051     libmv::Mat3 &amp;E )
<a name="l00052"></a>00052   {
<a name="l00053"></a>00053     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> nPoints = x1.cols( );
<a name="l00054"></a>00054     CV_DbgAssert( nPoints == x2.cols( ) );
<a name="l00055"></a>00055     CV_DbgAssert( nPoints &gt;= 5 );<span class="comment">//need 5 points!</span>
<a name="l00056"></a>00056 
<a name="l00057"></a>00057     cv::RNG&amp; rng = cv::theRNG( );
<a name="l00058"></a>00058     vector&lt;int&gt; masks( nPoints );
<a name="l00059"></a>00059     <span class="keywordtype">double</span> max_error = 1e9;
<a name="l00060"></a>00060 
<a name="l00061"></a>00061     <span class="keywordtype">int</span> num_iter=0, max_iter=1500;
<a name="l00062"></a>00062     <span class="keywordflow">for</span>( num_iter=0; num_iter&lt;max_iter; ++num_iter )
<a name="l00063"></a>00063     {
<a name="l00064"></a>00064       masks.assign( nPoints, 0 );
<a name="l00065"></a>00065       <span class="keywordtype">int</span> nb_vals=0;
<a name="l00066"></a>00066       <span class="comment">//choose 5 random points:</span>
<a name="l00067"></a>00067       <span class="keywordflow">while</span>( nb_vals &lt; 5 )
<a name="l00068"></a>00068       {
<a name="l00069"></a>00069         <span class="keywordtype">int</span> valTmp = rng( nPoints );
<a name="l00070"></a>00070         <span class="keywordflow">if</span>( masks[ valTmp ] == 0 )
<a name="l00071"></a>00071         {
<a name="l00072"></a>00072           masks[ valTmp ] = 1;
<a name="l00073"></a>00073           nb_vals++;
<a name="l00074"></a>00074         }
<a name="l00075"></a>00075       }
<a name="l00076"></a>00076       <span class="comment">//mix the random generator:</span>
<a name="l00077"></a>00077       rng( rng( nPoints ) );
<a name="l00078"></a>00078       <span class="comment">//create mask:</span>
<a name="l00079"></a>00079       libmv::Mat2X x1_tmp,x2_tmp;
<a name="l00080"></a>00080       x1_tmp.resize( 2,nb_vals );
<a name="l00081"></a>00081       x2_tmp.resize( 2,nb_vals );
<a name="l00082"></a>00082       nb_vals=0;
<a name="l00083"></a>00083       <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i;
<a name="l00084"></a>00084       <span class="keywordflow">for</span>( i = 0; i&lt;nPoints; ++i )
<a name="l00085"></a>00085       {
<a name="l00086"></a>00086         <span class="keywordflow">if</span>( masks[ i ] != 0 )
<a name="l00087"></a>00087         {
<a name="l00088"></a>00088           x1_tmp( 0,nb_vals ) = x1( 0,i );
<a name="l00089"></a>00089           x1_tmp( 1,nb_vals ) = x1( 1,i );
<a name="l00090"></a>00090           x2_tmp( 0,nb_vals ) = x2( 0,i );
<a name="l00091"></a>00091           x2_tmp( 1,nb_vals ) = x2( 1,i );
<a name="l00092"></a>00092           nb_vals++;
<a name="l00093"></a>00093         }
<a name="l00094"></a>00094       }
<a name="l00095"></a>00095       libmv::vector&lt;libmv::Mat3, Eigen::aligned_allocator&lt;libmv::Mat3&gt; &gt; Es(10);
<a name="l00096"></a>00096       libmv::FivePointsRelativePose( x1_tmp,x2_tmp,&amp;Es );
<a name="l00097"></a>00097       <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> num_hyp = Es.size( );
<a name="l00098"></a>00098       <span class="keywordflow">for</span> ( i = 0; i &lt; num_hyp; i++ ) {
<a name="l00099"></a>00099 
<a name="l00100"></a>00100         libmv::Mat3 F;
<a name="l00101"></a>00101         libmv::FundamentalFromEssential( Es[ i ], K1, K2, &amp;F );
<a name="l00102"></a>00102         <span class="keywordtype">double</span> error = SampsonDistance2( F, x1, x2 );
<a name="l00103"></a>00103 
<a name="l00104"></a>00104         <span class="keywordflow">if</span> ( max_error &gt; error ) {
<a name="l00105"></a>00105           max_error = error;
<a name="l00106"></a>00106           E = Es[ i ];
<a name="l00107"></a>00107         }
<a name="l00108"></a>00108       }
<a name="l00109"></a>00109     }
<a name="l00110"></a>00110     <span class="keywordflow">return</span> max_error;
<a name="l00111"></a>00111   }
<a name="l00112"></a>00112 
<a name="l00113"></a>00113 
<a name="l00114"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#ad0a71587b4ad3ffa445db2a03aacbb31">00114</a>   <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#ad0a71587b4ad3ffa445db2a03aacbb31">EuclideanEstimator::EuclideanEstimator</a>( <a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html" title="This class tries to match points in the entire sequence. It follow ideas proposed by Noah Snavely: Mo...">SequenceAnalyzer</a> &amp;sequence,
<a name="l00115"></a>00115     vector&lt;PointOfView&gt;&amp; cameras )
<a name="l00116"></a>00116     :sequence_( sequence ),cameras_( cameras )
<a name="l00117"></a>00117   {
<a name="l00118"></a>00118     vector&lt;PointOfView&gt;::iterator itPoV=cameras.begin( );
<a name="l00119"></a>00119     <span class="keywordflow">while</span> ( itPoV!=cameras.end( ) )
<a name="l00120"></a>00120     {
<a name="l00121"></a>00121       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9cde85fe09d8dfb249a1c99ad8948e18">addNewPointOfView</a>( *itPoV );
<a name="l00122"></a>00122       itPoV++;
<a name="l00123"></a>00123     }
<a name="l00124"></a>00124     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4f7ae927e99fdb0b6bb3125208aa1663" title="index of camera set as origin...">index_origin</a> = 0;
<a name="l00125"></a>00125   }
<a name="l00126"></a>00126 
<a name="l00127"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4fc02c7a735abdb3985fc6439cd06f05">00127</a>   <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4fc02c7a735abdb3985fc6439cd06f05">EuclideanEstimator::~EuclideanEstimator</a>( <span class="keywordtype">void</span> )
<a name="l00128"></a>00128   {
<a name="l00129"></a>00129     <span class="comment">//TODO!!!!</span>
<a name="l00130"></a>00130   }
<a name="l00131"></a>00131 
<a name="l00132"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9cde85fe09d8dfb249a1c99ad8948e18">00132</a>   <span class="keywordtype">void</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9cde85fe09d8dfb249a1c99ad8948e18">EuclideanEstimator::addNewPointOfView</a>( <span class="keyword">const</span> <a class="code" href="class_opencv_sf_m_1_1_point_of_view.html" title="This class represent the 3D position of the device which take the pictures. The role of the class is ...">PointOfView</a>&amp; camera )
<a name="l00133"></a>00133   {
<a name="l00134"></a>00134     libmv::Mat3 intra_param;
<a name="l00135"></a>00135     cv::Ptr&lt;Camera&gt; intra=camera.<a class="code" href="class_opencv_sf_m_1_1_point_of_view.html#ae5de2e16eac553ea1e1e7501e009bbbd">getIntraParameters</a>( );
<a name="l00136"></a>00136     <span class="comment">//transpose because libmv needs intra params this way...</span>
<a name="l00137"></a>00137     cv::cv2eigen( intra-&gt;getIntraMatrix( ).t(), intra_param );
<a name="l00138"></a>00138     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>.push_back( intra_param );
<a name="l00139"></a>00139     libmv::Mat3 rotation_mat;
<a name="l00140"></a>00140     cv::cv2eigen( camera.<a class="code" href="class_opencv_sf_m_1_1_point_of_view.html#af13aa49ebfdb95b056b20825b00d3866">getRotationMatrix</a>( ), rotation_mat );
<a name="l00141"></a>00141     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>.push_back( rotation_mat );
<a name="l00142"></a>00142     libmv::Vec3 translation_vec;
<a name="l00143"></a>00143     cv::cv2eigen( camera.<a class="code" href="class_opencv_sf_m_1_1_point_of_view.html#a79b41aa3dc0ff0f7f5c0d821a8688f1f">getTranslationVector</a>( ), translation_vec );
<a name="l00144"></a>00144     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>.push_back( translation_vec );
<a name="l00145"></a>00145     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>.push_back( <span class="keyword">false</span> );
<a name="l00146"></a>00146   }
<a name="l00147"></a>00147 
<a name="l00148"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9790538fdb197f1cebd4f9f26f85b975">00148</a>   <span class="keywordtype">void</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9790538fdb197f1cebd4f9f26f85b975">EuclideanEstimator::bundleAdjustement</a>( )
<a name="l00149"></a>00149   {
<a name="l00150"></a>00150     <span class="comment">//wrap the lourakis SBA:</span>
<a name="l00151"></a>00151     
<a name="l00152"></a>00152     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> n = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size( ),   <span class="comment">// number of points</span>
<a name="l00153"></a>00153       ncon = 0,<span class="comment">// number of points (starting from the 1st) whose parameters should not be modified.</span>
<a name="l00154"></a>00154       m = 0,   <span class="comment">// number of images (or camera)</span>
<a name="l00155"></a>00155       mcon = 1,<span class="comment">// number of cameras (starting from the 1st) whose parameters should not be modified.</span>
<a name="l00156"></a>00156       cnp = 6,<span class="comment">// number of parameters for ONE camera; e.g. 6 for Euclidean cameras</span>
<a name="l00157"></a>00157       <span class="comment">//use only vector part of quaternion to enforce the unit lenght...</span>
<a name="l00158"></a>00158       pnp = 3,<span class="comment">// number of parameters for ONE 3D point; e.g. 3 for Euclidean points</span>
<a name="l00159"></a>00159       mnp = 2;<span class="comment">// number of parameters for ONE projected point; e.g. 2 for Euclidean points</span>
<a name="l00160"></a>00160 
<a name="l00161"></a>00161     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0, j = 0,
<a name="l00162"></a>00162       nb_cam = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>.size( );
<a name="l00163"></a>00163     vector&lt; Ptr&lt; PointsToTrack &gt; &gt; &amp;points_to_track = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#aefda449985e3013c6ac2adc5f61151d4">getPoints</a>( );
<a name="l00164"></a>00164 
<a name="l00165"></a>00165     <span class="comment">//because some points are sometime not visible:</span>
<a name="l00166"></a>00166     vector&lt;int&gt; idx_cameras;
<a name="l00167"></a>00167     idx_cameras.push_back( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4f7ae927e99fdb0b6bb3125208aa1663" title="index of camera set as origin...">index_origin</a> );
<a name="l00168"></a>00168 
<a name="l00169"></a>00169     libmv::vector&lt; libmv::Mat3 &gt; intra_p;
<a name="l00170"></a>00170     std::vector&lt;bool&gt; pointOK;
<a name="l00171"></a>00171     <span class="keywordtype">int</span> nbPoints = 0;
<a name="l00172"></a>00172     <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00173"></a>00173     {<span class="comment">//for each 3D point:</span>
<a name="l00174"></a>00174       <span class="comment">//test if at least 2 views see this point:</span>
<a name="l00175"></a>00175       <span class="keywordtype">int</span> nbCam = 0;
<a name="l00176"></a>00176       <span class="keywordflow">for</span>(<span class="keywordtype">size_t</span> k =0; k&lt;nb_cam; ++k)
<a name="l00177"></a>00177       {
<a name="l00178"></a>00178         <span class="keywordflow">if</span>(<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ k ] &amp;&amp; <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].containImage( k ))
<a name="l00179"></a>00179           nbCam++;
<a name="l00180"></a>00180       }
<a name="l00181"></a>00181       pointOK.push_back( nbCam&gt;=2 );
<a name="l00182"></a>00182       <span class="keywordflow">if</span>(pointOK[j])
<a name="l00183"></a>00183         nbPoints++;
<a name="l00184"></a>00184     }
<a name="l00185"></a>00185 
<a name="l00186"></a>00186     <span class="keywordtype">int</span> nz_count = 0;
<a name="l00187"></a>00187     <span class="keywordflow">for</span> ( i = 0; i &lt; nb_cam; ++i )
<a name="l00188"></a>00188     {<span class="comment">//for each camera:</span>
<a name="l00189"></a>00189       <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ i ] )
<a name="l00190"></a>00190       {
<a name="l00191"></a>00191         <span class="keywordflow">if</span>( i!=<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4f7ae927e99fdb0b6bb3125208aa1663" title="index of camera set as origin...">index_origin</a> )<span class="comment">//index_origin is already added...</span>
<a name="l00192"></a>00192           idx_cameras.push_back(i);
<a name="l00193"></a>00193         m++;<span class="comment">//increament of camera count</span>
<a name="l00194"></a>00194 
<a name="l00195"></a>00195         <span class="keywordtype">int</span> nb_projection = 0;
<a name="l00196"></a>00196         <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00197"></a>00197         {<span class="comment">//for each 3D point:</span>
<a name="l00198"></a>00198           <span class="keywordflow">if</span>( pointOK[j])
<a name="l00199"></a>00199           {
<a name="l00200"></a>00200             <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].containImage( i ) )
<a name="l00201"></a>00201               nb_projection++;
<a name="l00202"></a>00202           }
<a name="l00203"></a>00203         }
<a name="l00204"></a>00204         nz_count += nb_projection;
<a name="l00205"></a>00205       }
<a name="l00206"></a>00206     }
<a name="l00207"></a>00207     n=nbPoints;
<a name="l00208"></a>00208     
<a name="l00209"></a>00209     <span class="comment">//2D points:</span>
<a name="l00210"></a>00210     <span class="keywordtype">char</span> *vmask = <span class="keyword">new</span> <span class="keywordtype">char</span>[ n*m ];<span class="comment">//visibility mask: vmask[i, j]=1 if point i visible in image j, 0 otherwise.</span>
<a name="l00211"></a>00211     <span class="keywordtype">double</span> *p = <span class="keyword">new</span> <span class="keywordtype">double</span>[m*cnp + n*pnp];<span class="comment">//initial parameter vector p0: (a1, ..., am, b1, ..., bn).</span>
<a name="l00212"></a>00212                    <span class="comment">// aj are the image j parameters, bi are the i-th point parameters</span>
<a name="l00213"></a>00213 
<a name="l00214"></a>00214     <span class="keywordtype">double</span> *x = <span class="keyword">new</span> <span class="keywordtype">double</span>[ 2*nz_count ];<span class="comment">// measurements vector: (x_11^T, .. x_1m^T, ..., x_n1^T, .. x_nm^T)^T where</span>
<a name="l00215"></a>00215                    <span class="comment">// x_ij is the projection of the i-th point on the j-th image.</span>
<a name="l00216"></a>00216                    <span class="comment">// NOTE: some of the x_ij might be missing, if point i is not visible in image j;</span>
<a name="l00217"></a>00217                    <span class="comment">// see vmask[i, j], max. size n*m*mnp</span>
<a name="l00218"></a>00218 
<a name="l00219"></a>00219     libmv::vector&lt; Eigen::Quaterniond &gt; init_rotation;
<a name="l00220"></a>00220     libmv::vector&lt; libmv::Vec3 &gt; init_translat;
<a name="l00221"></a>00221     <span class="comment">//update each variable:</span>
<a name="l00222"></a>00222     <span class="keywordtype">int</span> idx_visible = 0;
<a name="l00223"></a>00223     <span class="keywordtype">double</span> *p_local = p;
<a name="l00224"></a>00224     <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00225"></a>00225     {<span class="comment">//for each camera:</span>
<a name="l00226"></a>00226       <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00227"></a>00227       intra_p.push_back( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[idx_cam] );
<a name="l00228"></a>00228       <span class="comment">//extrinsic parameters only (intra are know in euclidean reconstruction)</span>
<a name="l00229"></a>00229       init_rotation.push_back( (Eigen::Quaterniond)<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ] );
<a name="l00230"></a>00230       init_translat.push_back( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ] );
<a name="l00231"></a>00231       <span class="comment">//add camera parameters to p:</span>
<a name="l00232"></a>00232       <span class="comment">//as this is rotation, the quaternion&#39;s length is unity. Only 3 values are needed.</span>
<a name="l00233"></a>00233       <span class="comment">//4th value equal:</span>
<a name="l00234"></a>00234       <span class="comment">//sqrt(1.0 - quat[0]*quat[0] - quat[1]*quat[1] - quat[2]*quat[2]));</span>
<a name="l00235"></a>00235 
<a name="l00236"></a>00236       p_local[0] = 0; p_local[1] = 0; p_local[2] = 0;
<a name="l00237"></a>00237 
<a name="l00238"></a>00238       p_local[3] = 0; p_local[4] = 0; p_local[5] = 0;
<a name="l00239"></a>00239 
<a name="l00240"></a>00240       p_local+=cnp;
<a name="l00241"></a>00241     }
<a name="l00242"></a>00242 
<a name="l00243"></a>00243     <span class="comment">//now add the projections and 3D points:</span>
<a name="l00244"></a>00244     idx_visible = 0;
<a name="l00245"></a>00245     <span class="keywordtype">int</span> j_real = 0;
<a name="l00246"></a>00246     <span class="keywordflow">for</span> ( j = 0; j &lt; <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size(); ++j )
<a name="l00247"></a>00247     {<span class="comment">//for each 3D point:</span>
<a name="l00248"></a>00248       <span class="keywordflow">if</span>( pointOK[j])
<a name="l00249"></a>00249       {
<a name="l00250"></a>00250         <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00251"></a>00251         {<span class="comment">//for each camera:</span>
<a name="l00252"></a>00252           <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00253"></a>00253           vmask[ i+j_real*m ] = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].containImage( idx_cam );
<a name="l00254"></a>00254           <span class="keywordflow">if</span>( vmask[ i+j_real*m ] )
<a name="l00255"></a>00255           {
<a name="l00256"></a>00256             cv::KeyPoint pt = points_to_track[ idx_cam ]-&gt;getKeypoint(
<a name="l00257"></a>00257               <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].getPointIndex( idx_cam ) );
<a name="l00258"></a>00258             x[ idx_visible++ ] = pt.pt.x;
<a name="l00259"></a>00259             x[ idx_visible++ ] = pt.pt.y;
<a name="l00260"></a>00260           }
<a name="l00261"></a>00261         }
<a name="l00262"></a>00262         j_real++;
<a name="l00263"></a>00263       }
<a name="l00264"></a>00264     }
<a name="l00265"></a>00265     <span class="keywordtype">double</span>* points3D_values = p_local;
<a name="l00266"></a>00266     <span class="keywordflow">for</span> ( j = 0; j &lt; <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size(); ++j )
<a name="l00267"></a>00267     {<span class="comment">//for each 3D point:</span>
<a name="l00268"></a>00268       <span class="keywordflow">if</span>( pointOK[j])
<a name="l00269"></a>00269       {
<a name="l00270"></a>00270         cv::Ptr&lt;cv::Vec3d&gt; cv3DPoint = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].get3DPosition();
<a name="l00271"></a>00271         <span class="keywordflow">if</span>(cv3DPoint.empty())
<a name="l00272"></a>00272         {
<a name="l00273"></a>00273           *(p_local++) = 0;
<a name="l00274"></a>00274           *(p_local++) = 0;
<a name="l00275"></a>00275           *(p_local++) = 0;
<a name="l00276"></a>00276         }
<a name="l00277"></a>00277         <span class="keywordflow">else</span>
<a name="l00278"></a>00278         {
<a name="l00279"></a>00279           *(p_local++) = (*cv3DPoint)[ 0 ];
<a name="l00280"></a>00280           *(p_local++) = (*cv3DPoint)[ 1 ];
<a name="l00281"></a>00281           *(p_local++) = (*cv3DPoint)[ 2 ];
<a name="l00282"></a>00282         }
<a name="l00283"></a>00283       }
<a name="l00284"></a>00284     }
<a name="l00285"></a>00285 
<a name="l00286"></a>00286     <a class="code" href="struct_opencv_sf_m_1_1bundle__datas.html">bundle_datas</a> data(intra_p,init_rotation,init_translat,
<a name="l00287"></a>00287       cnp, pnp, mnp,ncon, mcon);
<a name="l00288"></a>00288     data.<a class="code" href="struct_opencv_sf_m_1_1bundle__datas.html#a842e05defa44648188b6cc2a7bb64b95" title="List of 3d points.">points3D</a> = points3D_values;
<a name="l00289"></a>00289 
<a name="l00291"></a>00291 <span class="preprocessor">#define PRINT_DEBUG</span>
<a name="l00292"></a>00292 <span class="preprocessor"></span><span class="preprocessor">#ifdef PRINT_DEBUG</span>
<a name="l00293"></a>00293 <span class="preprocessor"></span>    <span class="comment">//Debug compare projected point vs estimated point:</span>
<a name="l00294"></a>00294     idx_visible = 0;
<a name="l00295"></a>00295     <span class="keywordtype">double</span> max_distance = 0;
<a name="l00296"></a>00296     <span class="keywordtype">double</span> max_depth = 0;
<a name="l00297"></a>00297     j_real = 0;
<a name="l00298"></a>00298     <span class="keywordflow">for</span> ( j = 0; j &lt; <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size(); ++j )
<a name="l00299"></a>00299     {<span class="comment">//for each 3D point:</span>
<a name="l00300"></a>00300       <span class="keywordflow">if</span>( pointOK[j])
<a name="l00301"></a>00301       {
<a name="l00302"></a>00302         <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00303"></a>00303         {<span class="comment">//for each camera:</span>
<a name="l00304"></a>00304           <span class="comment">//2D projected points</span>
<a name="l00305"></a>00305           <span class="keywordflow">if</span>( vmask[ i+j_real*m ] )
<a name="l00306"></a>00306           {
<a name="l00307"></a>00307             cv::Vec3d&amp; cv3DPoint = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ];
<a name="l00308"></a>00308             <span class="comment">//cout&lt;&lt;&quot;Vec3d : &quot;&lt;&lt; cv3DPoint[ 0 ]&lt;&lt;&quot;, &quot;&lt;&lt; cv3DPoint[ 1 ]&lt;&lt;&quot;, &quot;&lt;&lt; cv3DPoint[ 2 ]&lt;&lt;endl;</span>
<a name="l00309"></a>00309             <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00310"></a>00310             cv::KeyPoint pt = points_to_track[ idx_cam ]-&gt;getKeypoint(
<a name="l00311"></a>00311               <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].getPointIndex( idx_cam ) );
<a name="l00312"></a>00312             cv::Vec2d proj = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ idx_cam ].project3DPointIntoImage(cv3DPoint);<span class="comment">/*</span>
<a name="l00313"></a>00313 <span class="comment">            cout&lt;&lt;pt.pt.x&lt;&lt;&quot;,&quot;&lt;&lt;pt.pt.y&lt;&lt;&quot; -&gt; &quot;;</span>
<a name="l00314"></a>00314 <span class="comment">            cout&lt;&lt;proj[0]&lt;&lt;&quot;,&quot;&lt;&lt;proj[1]&lt;&lt;endl;*/</span>
<a name="l00315"></a>00315             max_distance += (pt.pt.x - proj[0])*(pt.pt.x - proj[0]) +
<a name="l00316"></a>00316               (pt.pt.y - proj[1])*(pt.pt.y - proj[1]);
<a name="l00317"></a>00317 
<a name="l00318"></a>00318             libmv::Vec3 X(cv3DPoint[0],cv3DPoint[1],cv3DPoint[2]);
<a name="l00319"></a>00319             max_depth += abs( (<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ]*X)(2) + <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ](2) );
<a name="l00320"></a>00320           }
<a name="l00321"></a>00321         }
<a name="l00322"></a>00322         j_real++;
<a name="l00323"></a>00323       }
<a name="l00324"></a>00324       <span class="comment">//system(&quot;pause&quot;);</span>
<a name="l00325"></a>00325     }
<a name="l00326"></a>00326 <span class="preprocessor">#endif</span>
<a name="l00327"></a>00327 <span class="preprocessor"></span>
<a name="l00328"></a>00328 
<a name="l00329"></a>00329     <span class="comment">//TUNING PARAMETERS:</span>
<a name="l00330"></a>00330     <span class="keywordtype">int</span> itmax = 10000;        <span class="comment">//max iterations</span>
<a name="l00331"></a>00331     <span class="keywordtype">int</span> verbose = 1;
<a name="l00332"></a>00332     <span class="keywordtype">double</span> opts[SBA_OPTSSZ] = {
<a name="l00333"></a>00333       0.001,            <span class="comment">//Tau</span>
<a name="l00334"></a>00334       1e-20,            <span class="comment">//E1</span>
<a name="l00335"></a>00335       1e-20,            <span class="comment">//E2</span>
<a name="l00336"></a>00336       0,                <span class="comment">//E3 average reprojection error</span>
<a name="l00337"></a>00337       0         <span class="comment">//E4 relative reduction in the RMS reprojection error</span>
<a name="l00338"></a>00338     };
<a name="l00339"></a>00339 
<a name="l00340"></a>00340     <span class="keywordtype">double</span> info[SBA_INFOSZ];
<a name="l00341"></a>00341     
<a name="l00342"></a>00342     <span class="comment">//use sba library</span>
<a name="l00343"></a>00343     <span class="keywordtype">int</span> iter = sba_motstr_levmar_x(n, ncon, m, mcon, vmask, p, cnp, pnp, x, NULL, mnp,
<a name="l00344"></a>00344         img_projsRTS_x, img_projsRTS_jac_x, (<span class="keywordtype">void</span>*)&amp;data, itmax, 0, opts, info);
<a name="l00345"></a>00345 
<a name="l00346"></a>00346     std::cout&lt;&lt;<span class="stringliteral">&quot;SBA returned in &quot;</span>&lt;&lt;iter&lt;&lt;<span class="stringliteral">&quot; iter, reason &quot;</span>&lt;&lt;info[6]
<a name="l00347"></a>00347     &lt;&lt;<span class="stringliteral">&quot;, error &quot;</span>&lt;&lt;info[1]&lt;&lt;<span class="stringliteral">&quot; [initial &quot;</span>&lt;&lt; info[0]&lt;&lt;<span class="stringliteral">&quot;]\n&quot;</span>;
<a name="l00348"></a>00348     <span class="keywordflow">if</span>(iter&gt;1)
<a name="l00349"></a>00349     {
<a name="l00350"></a>00350     <span class="comment">//set new values:</span>
<a name="l00351"></a>00351     m = idx_cameras.size();
<a name="l00352"></a>00352     n = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size( );
<a name="l00353"></a>00353     idx_visible = 0;
<a name="l00354"></a>00354     p_local = p;
<a name="l00355"></a>00355     <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00356"></a>00356     {<span class="comment">//for each camera:</span>
<a name="l00357"></a>00357       <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00358"></a>00358       <span class="comment">//extrinsic parameters only (intra are know in euclidean reconstruction)</span>
<a name="l00359"></a>00359 
<a name="l00360"></a>00360       Eigen::Quaterniond rot_init = data.rotations[i];
<a name="l00361"></a>00361       <span class="keywordtype">double</span> c1 = p_local[0];
<a name="l00362"></a>00362       <span class="keywordtype">double</span> c2 = p_local[1];
<a name="l00363"></a>00363       <span class="keywordtype">double</span> c3 = p_local[2];
<a name="l00364"></a>00364       <span class="keywordtype">double</span> coef=(1.0 - c1*c1 - c2*c2 - c3*c3 );
<a name="l00365"></a>00365       <span class="keywordflow">if</span>( coef&gt;0 )
<a name="l00366"></a>00366         coef = sqrt( coef );
<a name="l00367"></a>00367       <span class="keywordflow">else</span><span class="comment">//problem with this rotation...</span>
<a name="l00368"></a>00368       {
<a name="l00369"></a>00369         coef = 0;
<a name="l00370"></a>00370         Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
<a name="l00371"></a>00371         quat_delta.normalize();
<a name="l00372"></a>00372         c1=quat_delta.x(); c2=quat_delta.y(); c3=quat_delta.z();
<a name="l00373"></a>00373         coef = quat_delta.w();
<a name="l00374"></a>00374       }
<a name="l00375"></a>00375 
<a name="l00376"></a>00376       Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
<a name="l00377"></a>00377       Eigen::Quaterniond rot_total = quat_delta * rot_init;
<a name="l00378"></a>00378       <span class="comment">//add camera parameters to p:</span>
<a name="l00379"></a>00379       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ] = rot_total.toRotationMatrix();
<a name="l00380"></a>00380 
<a name="l00381"></a>00381       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ](0) += p_local[3];
<a name="l00382"></a>00382       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ](1) += p_local[4];
<a name="l00383"></a>00383       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ](2) += p_local[5];
<a name="l00384"></a>00384 
<a name="l00385"></a>00385       <span class="comment">//update camera&#39;s structure:</span>
<a name="l00386"></a>00386       cv::Mat newRotation,newTranslation;
<a name="l00387"></a>00387       cv::eigen2cv( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ], newRotation );
<a name="l00388"></a>00388       cv::eigen2cv( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ], newTranslation );
<a name="l00389"></a>00389       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ idx_cam ].setRotationMatrix( newRotation );
<a name="l00390"></a>00390       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ idx_cam ].setTranslationVector( newTranslation );
<a name="l00391"></a>00391 
<a name="l00392"></a>00392       p_local+=cnp;
<a name="l00393"></a>00393     }
<a name="l00394"></a>00394     <span class="keywordflow">for</span> ( j = 0; j &lt; <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size(); ++j )
<a name="l00395"></a>00395     {<span class="comment">//for each 3D point:</span>
<a name="l00396"></a>00396       <span class="keywordflow">if</span>( pointOK[j])
<a name="l00397"></a>00397       {
<a name="l00398"></a>00398         cv::Vec3d cv3DPoint;
<a name="l00399"></a>00399         cv3DPoint[ 0 ] = *(p_local++);
<a name="l00400"></a>00400         cv3DPoint[ 1 ] = *(p_local++);
<a name="l00401"></a>00401         cv3DPoint[ 2 ] = *(p_local++);
<a name="l00402"></a>00402         <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].set3DPosition( cv3DPoint );
<a name="l00403"></a>00403       }
<a name="l00404"></a>00404     }
<a name="l00405"></a>00405 
<a name="l00407"></a>00407 <span class="preprocessor">#ifdef PRINT_DEBUG</span>
<a name="l00408"></a>00408 <span class="preprocessor"></span>    <span class="comment">//Debug compare projected point vs estimated point:</span>
<a name="l00409"></a>00409     idx_visible = 0;
<a name="l00410"></a>00410     <span class="comment">//2D projected points</span>
<a name="l00411"></a>00411     <span class="keywordtype">double</span> max_distance_1 = 0;
<a name="l00412"></a>00412     <span class="keywordtype">double</span> max_depth1 = 0;
<a name="l00413"></a>00413     j_real = 0;
<a name="l00414"></a>00414     <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00415"></a>00415     {<span class="comment">//for each 3D point:</span>
<a name="l00416"></a>00416       <span class="keywordflow">if</span>( pointOK[j] )
<a name="l00417"></a>00417       {
<a name="l00418"></a>00418       <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00419"></a>00419       {<span class="comment">//for each camera:</span>
<a name="l00420"></a>00420         <span class="keywordflow">if</span>( vmask[ i+j_real*m ] )
<a name="l00421"></a>00421         {
<a name="l00422"></a>00422           cv::Vec3d&amp; cv3DPoint = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ];
<a name="l00423"></a>00423           <span class="comment">//cout&lt;&lt;&quot;Vec3d : &quot;&lt;&lt; cv3DPoint[ 0 ]&lt;&lt;&quot;, &quot;&lt;&lt; cv3DPoint[ 1 ]&lt;&lt;&quot;, &quot;&lt;&lt; cv3DPoint[ 2 ]&lt;&lt;endl;</span>
<a name="l00424"></a>00424           <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00425"></a>00425           cv::KeyPoint pt = points_to_track[ idx_cam ]-&gt;getKeypoint(
<a name="l00426"></a>00426             <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[ j ].getPointIndex( idx_cam ) );
<a name="l00427"></a>00427           cv::Vec2d proj = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ idx_cam ].project3DPointIntoImage(cv3DPoint);<span class="comment">/*</span>
<a name="l00428"></a>00428 <span class="comment">          cout&lt;&lt;pt.pt.x&lt;&lt;&quot;,&quot;&lt;&lt;pt.pt.y&lt;&lt;&quot; -&gt; &quot;;</span>
<a name="l00429"></a>00429 <span class="comment">          cout&lt;&lt;proj[0]&lt;&lt;&quot;,&quot;&lt;&lt;proj[1]&lt;&lt;endl;*/</span>
<a name="l00430"></a>00430           max_distance_1 += (pt.pt.x - proj[0])*(pt.pt.x - proj[0]) +
<a name="l00431"></a>00431             (pt.pt.y - proj[1])*(pt.pt.y - proj[1]);
<a name="l00432"></a>00432           libmv::Vec3 X(cv3DPoint[0],cv3DPoint[1],cv3DPoint[2]);
<a name="l00433"></a>00433           max_depth1 += abs( (<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ]*X)(2) + <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ](2) );
<a name="l00434"></a>00434         }
<a name="l00435"></a>00435       }
<a name="l00436"></a>00436       j_real++;
<a name="l00437"></a>00437     }
<a name="l00438"></a>00438       <span class="comment">//system(&quot;pause&quot;);</span>
<a name="l00439"></a>00439     }
<a name="l00440"></a>00440     cout&lt;&lt; (max_distance)&lt;&lt;<span class="stringliteral">&quot;  ; &quot;</span>&lt;&lt;(max_distance_1)&lt;&lt;endl;
<a name="l00441"></a>00441     cout&lt;&lt; (max_depth)&lt;&lt;<span class="stringliteral">&quot;  ; &quot;</span>&lt;&lt;(max_depth1)&lt;&lt;endl;
<a name="l00442"></a>00442 <span class="preprocessor">#endif</span>
<a name="l00443"></a>00443 <span class="preprocessor"></span>
<a name="l00444"></a>00444 
<a name="l00445"></a>00445     }
<a name="l00446"></a>00446 
<a name="l00447"></a>00447     <span class="keyword">delete</span> [] vmask;<span class="comment">//visibility mask</span>
<a name="l00448"></a>00448     <span class="keyword">delete</span> [] p;<span class="comment">//initial parameter vector p0: (a1, ..., am, b1, ..., bn).</span>
<a name="l00449"></a>00449     <span class="keyword">delete</span> [] x;<span class="comment">// measurement vector</span>
<a name="l00450"></a>00450   }
<a name="l00451"></a>00451 
<a name="l00452"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#ae9b18130f68de37ae8385e01426ee82f">00452</a>   <span class="keywordtype">bool</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#ae9b18130f68de37ae8385e01426ee82f">EuclideanEstimator::cameraResection</a>( <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> image )
<a name="l00453"></a>00453   {
<a name="l00454"></a>00454     <span class="comment">//wrap the lourakis SBA:</span>
<a name="l00455"></a>00455     cout&lt;&lt;<span class="stringliteral">&quot;resection&quot;</span>&lt;&lt;endl;
<a name="l00456"></a>00456     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> n = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.size( ),   <span class="comment">// number of points</span>
<a name="l00457"></a>00457       m = 0,   <span class="comment">// number of images (or camera)</span>
<a name="l00458"></a>00458       mcon = 0,<span class="comment">// number of images (starting from the 1st) whose parameters should not be modified.</span>
<a name="l00459"></a>00459       cnp = 6,<span class="comment">// number of parameters for ONE camera; e.g. 6 for Euclidean cameras</span>
<a name="l00460"></a>00460       <span class="comment">//use only vector part of quaternion to enforce the unit lenght...</span>
<a name="l00461"></a>00461       mnp = 2;<span class="comment">// number of parameters for ONE projected point; e.g. 2 for Euclidean points</span>
<a name="l00462"></a>00462 
<a name="l00463"></a>00463     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0, j = 0,
<a name="l00464"></a>00464       nb_cam = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>.size( );
<a name="l00465"></a>00465     vector&lt; Ptr&lt; PointsToTrack &gt; &gt; &amp;points_to_track = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#aefda449985e3013c6ac2adc5f61151d4">getPoints</a>( );
<a name="l00466"></a>00466     vector&lt; TrackOfPoints &gt; real_track;
<a name="l00467"></a>00467     <span class="comment">//keep only tracks having image:</span>
<a name="l00468"></a>00468 
<a name="l00469"></a>00469     <span class="keywordflow">for</span>(i=0; i&lt;n; i++)
<a name="l00470"></a>00470       <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[i].containImage(image) )
<a name="l00471"></a>00471         real_track.push_back(<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>[i]);
<a name="l00472"></a>00472     n = real_track.size();
<a name="l00473"></a>00473 
<a name="l00474"></a>00474     <span class="comment">//now for fun show the sequence on images:</span>
<a name="l00475"></a>00475     <span class="comment">//sequence_.showTracks( image, real_track );</span>
<a name="l00476"></a>00476 
<a name="l00477"></a>00477     <span class="comment">//because some points are sometime not visible:</span>
<a name="l00478"></a>00478     vector&lt;int&gt; idx_cameras;
<a name="l00479"></a>00479     libmv::vector&lt; libmv::Mat3 &gt; intra_p;
<a name="l00480"></a>00480     <span class="keywordtype">int</span> nz_count = 0;
<a name="l00481"></a>00481     <span class="keywordflow">for</span> ( i = 0; i &lt; nb_cam; ++i )
<a name="l00482"></a>00482     {<span class="comment">//for each camera:</span>
<a name="l00483"></a>00483       <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ i ] &amp;&amp; i!=image )
<a name="l00484"></a>00484       {
<a name="l00485"></a>00485         idx_cameras.push_back(i);
<a name="l00486"></a>00486         m++;<span class="comment">//increament of camera count</span>
<a name="l00487"></a>00487 
<a name="l00488"></a>00488         <span class="keywordtype">int</span> nb_projection = 0;
<a name="l00489"></a>00489         <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00490"></a>00490         {<span class="comment">//for each 3D point:</span>
<a name="l00491"></a>00491           <span class="keywordflow">if</span>( real_track[ j ].containImage( i ) )
<a name="l00492"></a>00492             nb_projection++;
<a name="l00493"></a>00493         }
<a name="l00494"></a>00494         nz_count += nb_projection;
<a name="l00495"></a>00495       }
<a name="l00496"></a>00496     }
<a name="l00497"></a>00497 
<a name="l00498"></a>00498     mcon = m;<span class="comment">//other cameras are constant!</span>
<a name="l00499"></a>00499 
<a name="l00500"></a>00500     idx_cameras.push_back(image);
<a name="l00501"></a>00501     m++;<span class="comment">//increament of camera count</span>
<a name="l00502"></a>00502 
<a name="l00503"></a>00503     <span class="keywordtype">int</span> nb_projection = 0;
<a name="l00504"></a>00504     <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00505"></a>00505     {<span class="comment">//for each 3D point:</span>
<a name="l00506"></a>00506       <span class="keywordflow">if</span>( real_track[ j ].containImage( image ) )
<a name="l00507"></a>00507         nb_projection++;
<a name="l00508"></a>00508     }
<a name="l00509"></a>00509     nz_count += nb_projection;
<a name="l00510"></a>00510 
<a name="l00511"></a>00511     <span class="comment">//2D points:</span>
<a name="l00512"></a>00512     <span class="keywordtype">char</span> *vmask = <span class="keyword">new</span> <span class="keywordtype">char</span>[ n*m ];<span class="comment">//visibility mask: vmask[i, j]=1 if point i visible in image j, 0 otherwise.</span>
<a name="l00513"></a>00513     <span class="keywordtype">double</span> *p = <span class="keyword">new</span> <span class="keywordtype">double</span>[m*cnp + n*3];<span class="comment">//initial parameter vector p0: (a1, ..., am, b1, ..., bn).</span>
<a name="l00514"></a>00514     <span class="comment">// aj are the image j parameters, bi are the i-th point parameters</span>
<a name="l00515"></a>00515 
<a name="l00516"></a>00516     <span class="keywordtype">double</span> *x = <span class="keyword">new</span> <span class="keywordtype">double</span>[ 2*nz_count ];<span class="comment">// measurements vector: (x_11^T, .. x_1m^T, ..., x_n1^T, .. x_nm^T)^T where</span>
<a name="l00517"></a>00517     <span class="comment">// x_ij is the projection of the i-th point on the j-th image.</span>
<a name="l00518"></a>00518     <span class="comment">// NOTE: some of the x_ij might be missing, if point i is not visible in image j;</span>
<a name="l00519"></a>00519     <span class="comment">// see vmask[i, j], max. size n*m*mnp</span>
<a name="l00520"></a>00520 
<a name="l00521"></a>00521     libmv::vector&lt; Eigen::Quaterniond &gt; init_rotation;
<a name="l00522"></a>00522     libmv::vector&lt; libmv::Vec3 &gt; init_translat;
<a name="l00523"></a>00523     <span class="comment">//update each variable:</span>
<a name="l00524"></a>00524     <span class="keywordtype">double</span> *p_local = p;
<a name="l00525"></a>00525     <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00526"></a>00526     {<span class="comment">//for each camera:</span>
<a name="l00527"></a>00527       <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00528"></a>00528       intra_p.push_back( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[idx_cam] );
<a name="l00529"></a>00529       <span class="comment">//extrinsic parameters only (intra are know in euclidean reconstruction)</span>
<a name="l00530"></a>00530       init_rotation.push_back( (Eigen::Quaterniond)<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ idx_cam ] );
<a name="l00531"></a>00531       init_translat.push_back( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ idx_cam ] );
<a name="l00532"></a>00532       <span class="comment">//add camera parameters to p:</span>
<a name="l00533"></a>00533       <span class="comment">//as this is rotation, the quaternion&#39;s length is unity. Only 3 values are needed.</span>
<a name="l00534"></a>00534       <span class="comment">//4th value equal:</span>
<a name="l00535"></a>00535       <span class="comment">//sqrt(1.0 - quat[0]*quat[0] - quat[1]*quat[1] - quat[2]*quat[2]));</span>
<a name="l00536"></a>00536 
<a name="l00537"></a>00537       p_local[0] = 0; p_local[1] = 0; p_local[2] = 0;
<a name="l00538"></a>00538 
<a name="l00539"></a>00539 
<a name="l00540"></a>00540       p_local[3] = 0; p_local[4] = 0; p_local[5] = 0;
<a name="l00541"></a>00541 
<a name="l00542"></a>00542       p_local+=cnp;
<a name="l00543"></a>00543     }
<a name="l00544"></a>00544 
<a name="l00545"></a>00545     <span class="comment">//now add the projections and 3D points:</span>
<a name="l00546"></a>00546     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> idx_visible = 0;
<a name="l00547"></a>00547     <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00548"></a>00548     {<span class="comment">//for each 3D point:</span>
<a name="l00549"></a>00549       <span class="keywordflow">for</span> ( i=0; i &lt; m; ++i )
<a name="l00550"></a>00550       {<span class="comment">//for each camera:</span>
<a name="l00551"></a>00551         <span class="keywordtype">int</span> idx_cam = idx_cameras[i];
<a name="l00552"></a>00552         vmask[ i+j*m ] = real_track[ j ].containImage( idx_cam );
<a name="l00553"></a>00553         <span class="keywordflow">if</span>( vmask[ i+j*m ] )
<a name="l00554"></a>00554         {
<a name="l00555"></a>00555           cv::KeyPoint pt = points_to_track[ idx_cam ]-&gt;getKeypoint(
<a name="l00556"></a>00556             real_track[ j ].getPointIndex( idx_cam ) );
<a name="l00557"></a>00557           x[ idx_visible++ ] = pt.pt.x;
<a name="l00558"></a>00558           x[ idx_visible++ ] = pt.pt.y;
<a name="l00559"></a>00559         }
<a name="l00560"></a>00560       }
<a name="l00561"></a>00561     }
<a name="l00562"></a>00562     <span class="keywordtype">double</span>* points3D_values = p_local;
<a name="l00563"></a>00563     <span class="keywordflow">for</span> ( j = 0; j &lt; n; ++j )
<a name="l00564"></a>00564     {<span class="comment">//for each 3D point:</span>
<a name="l00565"></a>00565       cv::Vec3d cv3DPoint = real_track[ j ];
<a name="l00566"></a>00566       *(p_local++) = cv3DPoint[ 0 ];
<a name="l00567"></a>00567       *(p_local++) = cv3DPoint[ 1 ];
<a name="l00568"></a>00568       *(p_local++) = cv3DPoint[ 2 ];
<a name="l00569"></a>00569     }
<a name="l00570"></a>00570 
<a name="l00571"></a>00571     <span class="comment">//TUNING PARAMETERS:</span>
<a name="l00572"></a>00572     <span class="keywordtype">int</span> itmax = 1000;        <span class="comment">//max iterations</span>
<a name="l00573"></a>00573     <span class="keywordtype">int</span> verbose = 0;         <span class="comment">//no debug</span>
<a name="l00574"></a>00574     <span class="keywordtype">double</span> opts[SBA_OPTSSZ] = {
<a name="l00575"></a>00575       0.1,              <span class="comment">//Tau</span>
<a name="l00576"></a>00576       1e-12,            <span class="comment">//E1</span>
<a name="l00577"></a>00577       1e-12,            <span class="comment">//E2</span>
<a name="l00578"></a>00578       0,                <span class="comment">//E3 average reprojection error</span>
<a name="l00579"></a>00579       0         <span class="comment">//E4 relative reduction in the RMS reprojection error</span>
<a name="l00580"></a>00580     };
<a name="l00581"></a>00581 
<a name="l00582"></a>00582     <span class="keywordtype">double</span> info[SBA_INFOSZ];
<a name="l00583"></a>00583     <a class="code" href="struct_opencv_sf_m_1_1bundle__datas.html">bundle_datas</a> data(intra_p,init_rotation, init_translat,
<a name="l00584"></a>00584       cnp, 3, mnp, 0, mcon);
<a name="l00585"></a>00585     data.<a class="code" href="struct_opencv_sf_m_1_1bundle__datas.html#a842e05defa44648188b6cc2a7bb64b95" title="List of 3d points.">points3D</a> = points3D_values;
<a name="l00586"></a>00586     <span class="comment">//use sba library</span>
<a name="l00587"></a>00587     <span class="keywordtype">int</span> iter = sba_mot_levmar_x(n, m, mcon, vmask, p, cnp, x, NULL, mnp,
<a name="l00588"></a>00588       img_projsRT_x, NULL, (<span class="keywordtype">void</span>*)&amp;data, itmax, 0, opts, info);
<a name="l00589"></a>00589 
<a name="l00590"></a>00590     <span class="keywordtype">bool</span> resection_ok = <span class="keyword">true</span>;
<a name="l00591"></a>00591     <span class="keywordflow">if</span>( ( iter&lt;=0 ) || (info[1]/nz_count)&gt;100 )
<a name="l00592"></a>00592     {
<a name="l00593"></a>00593       resection_ok = <span class="keyword">false</span>;
<a name="l00594"></a>00594       std::cout&lt;&lt;<span class="stringliteral">&quot;resection rejected (&quot;</span>&lt;&lt;nz_count&lt;&lt;<span class="stringliteral">&quot;) : &quot;</span>&lt;&lt;info[1]/nz_count&lt;&lt;std::endl;
<a name="l00595"></a>00595     }
<a name="l00596"></a>00596     <span class="keywordflow">else</span>
<a name="l00597"></a>00597     {
<a name="l00598"></a>00598       std::cout&lt;&lt;<span class="stringliteral">&quot;SBA returned in &quot;</span>&lt;&lt;iter&lt;&lt;<span class="stringliteral">&quot; iter, reason &quot;</span>&lt;&lt;info[6]
<a name="l00599"></a>00599       &lt;&lt;<span class="stringliteral">&quot;, error &quot;</span>&lt;&lt;info[1]/nz_count&lt;&lt;<span class="stringliteral">&quot; [initial &quot;</span>&lt;&lt; info[0]/nz_count&lt;&lt;<span class="stringliteral">&quot;]\n&quot;</span>;
<a name="l00600"></a>00600 
<a name="l00601"></a>00601 
<a name="l00602"></a>00602       <span class="comment">//set new values:</span>
<a name="l00603"></a>00603       m = idx_cameras.size();
<a name="l00604"></a>00604       n = real_track.size( );
<a name="l00605"></a>00605       idx_visible = 0;
<a name="l00606"></a>00606       p_local = p + cnp*(m-1);
<a name="l00607"></a>00607 
<a name="l00608"></a>00608       <span class="comment">//extrinsic parameters only (intra are know in euclidean reconstruction)</span>
<a name="l00609"></a>00609       Eigen::Quaterniond rot_init = data.rotations[ m-1 ];
<a name="l00610"></a>00610       <span class="keywordtype">double</span> c1 = p_local[0];
<a name="l00611"></a>00611       <span class="keywordtype">double</span> c2 = p_local[1];
<a name="l00612"></a>00612       <span class="keywordtype">double</span> c3 = p_local[2];
<a name="l00613"></a>00613       <span class="keywordtype">double</span> coef=(1.0 - c1*c1 - c2*c2 - c3*c3 );
<a name="l00614"></a>00614       <span class="keywordflow">if</span>( coef&gt;0 )
<a name="l00615"></a>00615         coef = sqrt( coef );
<a name="l00616"></a>00616       <span class="keywordflow">else</span><span class="comment">//problem with this rotation...</span>
<a name="l00617"></a>00617       {
<a name="l00618"></a>00618         coef = 0;
<a name="l00619"></a>00619         Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
<a name="l00620"></a>00620         quat_delta.normalize();
<a name="l00621"></a>00621         c1=quat_delta.x(); c2=quat_delta.y(); c3=quat_delta.z();
<a name="l00622"></a>00622         coef = quat_delta.w();
<a name="l00623"></a>00623       }
<a name="l00624"></a>00624 
<a name="l00625"></a>00625       Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
<a name="l00626"></a>00626       Eigen::Quaterniond rot_total = quat_delta * rot_init;
<a name="l00627"></a>00627       <span class="comment">//add camera parameters to p:</span>
<a name="l00628"></a>00628       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ image ] = rot_total.toRotationMatrix();
<a name="l00629"></a>00629 
<a name="l00630"></a>00630       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image ](0) += p_local[3];
<a name="l00631"></a>00631       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image ](1) += p_local[4];
<a name="l00632"></a>00632       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image ](2) += p_local[5];
<a name="l00633"></a>00633 
<a name="l00634"></a>00634       <span class="comment">//update camera&#39;s structure:</span>
<a name="l00635"></a>00635       cv::Mat newRotation,newTranslation;
<a name="l00636"></a>00636       cv::eigen2cv( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ image ], newRotation );
<a name="l00637"></a>00637       cv::eigen2cv( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image ], newTranslation );
<a name="l00638"></a>00638       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image ].setRotationMatrix( newRotation );
<a name="l00639"></a>00639       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image ].setTranslationVector( newTranslation );
<a name="l00640"></a>00640 
<a name="l00641"></a>00641       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ image ] = <span class="keyword">true</span>;
<a name="l00642"></a>00642     }
<a name="l00643"></a>00643 
<a name="l00644"></a>00644     <span class="keyword">delete</span> [] vmask;<span class="comment">//visibility mask</span>
<a name="l00645"></a>00645     <span class="keyword">delete</span> [] p;<span class="comment">//initial parameter vector p0: (a1, ..., am, b1, ..., bn).</span>
<a name="l00646"></a>00646     <span class="keyword">delete</span> [] x;<span class="comment">// measurement vector</span>
<a name="l00647"></a>00647 
<a name="l00648"></a>00648 
<a name="l00649"></a>00649     <span class="keywordflow">return</span> resection_ok;
<a name="l00650"></a>00650   }
<a name="l00651"></a>00651 
<a name="l00652"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a99b28486f11bc31a1d8b2b52ad3550a5">00652</a>   <span class="keywordtype">void</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a99b28486f11bc31a1d8b2b52ad3550a5">EuclideanEstimator::initialReconstruction</a>( <span class="keywordtype">int</span> image1, <span class="keywordtype">int</span> image2 )
<a name="l00653"></a>00653   {
<a name="l00654"></a>00654     vector&lt;TrackOfPoints&gt;&amp; tracks = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#af7c11e97664c050782bfc2d1808b219e">getTracks</a>( );
<a name="l00655"></a>00655     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ image1 ] = <span class="keyword">true</span>;
<a name="l00656"></a>00656     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4f7ae927e99fdb0b6bb3125208aa1663" title="index of camera set as origin...">index_origin</a> = image1;
<a name="l00657"></a>00657 
<a name="l00658"></a>00658     vector&lt; Ptr&lt; PointsToTrack &gt; &gt; &amp;points_to_track = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#aefda449985e3013c6ac2adc5f61151d4">getPoints</a>( );
<a name="l00659"></a>00659     libmv::Mat3 E;
<a name="l00660"></a>00660     Ptr&lt;PointsToTrack&gt; point_img1 = points_to_track[ image1 ];
<a name="l00661"></a>00661     Ptr&lt;PointsToTrack&gt; point_img2 = points_to_track[ image2 ];
<a name="l00662"></a>00662     <span class="comment">//first extract points matches:</span>
<a name="l00663"></a>00663     libmv::Mat2X x1,x2;
<a name="l00664"></a>00664     <span class="comment">//for each points:</span>
<a name="l00665"></a>00665     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> key_size = tracks.size( );
<a name="l00666"></a>00666     <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i;
<a name="l00667"></a>00667     vector&lt;TrackOfPoints&gt; matches;
<a name="l00668"></a>00668 
<a name="l00669"></a>00669     <span class="keywordflow">for</span> ( i=0; i &lt; key_size; ++i )
<a name="l00670"></a>00670     {
<a name="l00671"></a>00671       <a class="code" href="class_opencv_sf_m_1_1_track_of_points.html" title="This class store the track of keypoints. A track is a connected set of matching keypoints across mult...">TrackOfPoints</a> &amp;track = tracks[ i ];
<a name="l00672"></a>00672       <span class="keywordflow">if</span>( track.<a class="code" href="class_opencv_sf_m_1_1_track_of_points.html#aea06a066b4be5fa2ff5ef09cea7fb745">containImage</a>( image1 ) &amp;&amp; track.<a class="code" href="class_opencv_sf_m_1_1_track_of_points.html#aea06a066b4be5fa2ff5ef09cea7fb745">containImage</a>( image2 ) )
<a name="l00673"></a>00673         matches.push_back( track );
<a name="l00674"></a>00674     }
<a name="l00675"></a>00675     x1.resize( 2,matches.size( ) );
<a name="l00676"></a>00676     x2.resize( 2,matches.size( ) );
<a name="l00677"></a>00677 
<a name="l00678"></a>00678     key_size = matches.size( );
<a name="l00679"></a>00679     vector&lt;cv::Vec2d&gt; pointImg1,pointImg2;
<a name="l00680"></a>00680     <span class="keywordflow">for</span> ( i=0; i &lt; key_size; ++i )
<a name="l00681"></a>00681     {
<a name="l00682"></a>00682       <a class="code" href="class_opencv_sf_m_1_1_track_of_points.html" title="This class store the track of keypoints. A track is a connected set of matching keypoints across mult...">TrackOfPoints</a> &amp;track = matches[ i ];
<a name="l00683"></a>00683       cv::DMatch match = track.<a class="code" href="class_opencv_sf_m_1_1_track_of_points.html#a8d15153ed498789f27c319dadd6c58fd">toDMatch</a>( image1, image2 );
<a name="l00684"></a>00684 
<a name="l00685"></a>00685       pointImg1.push_back( cv::Vec2d( point_img1-&gt;getKeypoint( match.trainIdx ).pt.x,
<a name="l00686"></a>00686         point_img1-&gt;getKeypoint( match.trainIdx ).pt.y ) );
<a name="l00687"></a>00687       pointImg2.push_back( cv::Vec2d( point_img2-&gt;getKeypoint( match.queryIdx ).pt.x,
<a name="l00688"></a>00688         point_img2-&gt;getKeypoint( match.queryIdx ).pt.y ) );
<a name="l00689"></a>00689     }
<a name="l00690"></a>00690     vector&lt;cv::Vec2d&gt; pointNorm1 = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image1 ].getIntraParameters( )-&gt;
<a name="l00691"></a>00691       pixelToNormImageCoordinates( pointImg1 );
<a name="l00692"></a>00692     vector&lt;cv::Vec2d&gt; pointNorm2 = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image2 ].getIntraParameters( )-&gt;
<a name="l00693"></a>00693       pixelToNormImageCoordinates( pointImg2 );
<a name="l00694"></a>00694     key_size = pointNorm1.size( );
<a name="l00695"></a>00695     <span class="keywordflow">for</span> ( i=0; i &lt; key_size; ++i )
<a name="l00696"></a>00696     {
<a name="l00697"></a>00697       x1( 0,i ) = -pointNorm1[ i ][ 0 ];
<a name="l00698"></a>00698       x1( 1,i ) = -pointNorm1[ i ][ 1 ];
<a name="l00699"></a>00699       x2( 0,i ) = -pointNorm2[ i ][ 0 ];
<a name="l00700"></a>00700       x2( 1,i ) = -pointNorm2[ i ][ 1 ];
<a name="l00701"></a>00701     }
<a name="l00702"></a>00702     
<a name="l00703"></a>00703     <span class="keywordtype">double</span> error = robust5Points( x1, x2,
<a name="l00704"></a>00704       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[ image1 ], <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[ image2 ], E );
<a name="l00705"></a>00705 
<a name="l00706"></a>00706 
<a name="l00707"></a>00707     <span class="comment">//std::cout&lt;&lt;&quot;E: &quot;&lt;&lt;E&lt;&lt;std::endl;</span>
<a name="l00708"></a>00708     <span class="comment">//std::cout&lt;&lt;&quot;max_error: &quot;&lt;&lt;error&lt;&lt;std::endl;</span>
<a name="l00709"></a>00709 
<a name="l00710"></a>00710 
<a name="l00711"></a>00711     <span class="comment">//From this essential matrix extract relative motion:</span>
<a name="l00712"></a>00712     libmv::Mat3 R;
<a name="l00713"></a>00713     libmv::Vec3 t;
<a name="l00714"></a>00714     libmv::Vec2 x1Col, x2Col;
<a name="l00715"></a>00715     x1Col &lt;&lt; x1( 0,0 ), x1( 1,0 );
<a name="l00716"></a>00716     x2Col &lt;&lt; x2( 0,0 ), x2( 1,0 );
<a name="l00717"></a>00717     <span class="keywordtype">bool</span> ok = libmv::MotionFromEssentialAndCorrespondence( E,
<a name="l00718"></a>00718       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[ image1 ], x1Col,
<a name="l00719"></a>00719       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a434eb8f3da74122c2dd7fd323569a629" title="Intra parameters of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">intra_params_</a>[ image2 ], x2Col,
<a name="l00720"></a>00720       &amp;R, &amp;t );
<a name="l00721"></a>00721 
<a name="l00722"></a>00722     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ image2 ] = R * <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a4649831fdd99cb15be351928dc69213b" title="rotations matrix of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">rotations_</a>[ image1 ];
<a name="l00723"></a>00723     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image2 ] = t + R * <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a5b5ca96b398aad275d7ffd79c492d9ed" title="translation vectors of cameras (don&#39;t use them, they are strongly related to cameras_ attribut!...">translations_</a>[ image1 ];
<a name="l00724"></a>00724 
<a name="l00725"></a>00725     <span class="comment">//update camera&#39;s structure:</span>
<a name="l00726"></a>00726     cv::Mat newRotation,newTranslation;
<a name="l00727"></a>00727     cv::eigen2cv( rotations_[ image2 ], newRotation );
<a name="l00728"></a>00728     cv::eigen2cv( translations_[ image2 ], newTranslation );
<a name="l00729"></a>00729     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image2 ].setRotationMatrix( newRotation );
<a name="l00730"></a>00730     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ image2 ].setTranslationVector( newTranslation );
<a name="l00731"></a>00731 
<a name="l00732"></a>00732     <span class="comment">//this camera is now computed:</span>
<a name="l00733"></a>00733     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ image2 ] = <span class="keyword">true</span>;
<a name="l00734"></a>00734 
<a name="l00735"></a>00735     <span class="comment">//Triangulate the points:</span>
<a name="l00736"></a>00736     <a class="code" href="class_opencv_sf_m_1_1_structure_estimator.html" title="This class tries to find the 3D structure using a sequence and cameras fully parameterized.">StructureEstimator</a> se( &amp;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>, &amp;this-&gt;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a> );
<a name="l00737"></a>00737     vector&lt;int&gt; images_to_compute;
<a name="l00738"></a>00738     images_to_compute.push_back( image1 );
<a name="l00739"></a>00739     images_to_compute.push_back( image2 );
<a name="l00740"></a>00740     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a> = se.<a class="code" href="class_opencv_sf_m_1_1_structure_estimator.html#aebc57769a62034b549d9b729a3e8c1e7">computeStructure</a>( images_to_compute );
<a name="l00741"></a>00741     <span class="comment">//bundleAdjustement();</span>
<a name="l00742"></a>00742   }
<a name="l00743"></a>00743 
<a name="l00744"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a0c70610dd045f2526547682ef096cba4">00744</a>   <span class="keywordtype">void</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a0c70610dd045f2526547682ef096cba4">EuclideanEstimator::computeReconstruction</a>( )
<a name="l00745"></a>00745   {
<a name="l00746"></a>00746     vector&lt;TrackOfPoints&gt;&amp; tracks = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#af7c11e97664c050782bfc2d1808b219e">getTracks</a>( );
<a name="l00747"></a>00747     vector&lt; Ptr&lt; PointsToTrack &gt; &gt; &amp;points_to_track = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#aefda449985e3013c6ac2adc5f61151d4">getPoints</a>( );
<a name="l00748"></a>00748     <a class="code" href="class_opencv_sf_m_1_1_images_graph_connection.html" title="This class modelizes the images graph connections.">ImagesGraphConnection</a> &amp;images_graph = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>.<a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#a2354ea088e81bcec7e85ceceebda7e21">getImgGraph</a>( );
<a name="l00749"></a>00749     <span class="comment">//double ransac_threshold = 0.4 * sequence_.getImage( 0 ).rows / 100.0;</span>
<a name="l00750"></a>00750     <span class="keywordtype">double</span> ransac_threshold = 3.0;
<a name="l00751"></a>00751     <span class="comment">//now create the graph:</span>
<a name="l00752"></a>00752 
<a name="l00753"></a>00753     <span class="keywordtype">int</span> img1,img2;
<a name="l00754"></a>00754     <span class="keywordtype">int</span> nbMatches = images_graph.<a class="code" href="class_opencv_sf_m_1_1_images_graph_connection.html#a96ff89db7e87560208a9bcda7f66d6bd">getHighestLink</a>( img1,img2 );
<a name="l00755"></a>00755     vector&lt;ImageLink&gt; bestMatches;
<a name="l00756"></a>00756     images_graph.<a class="code" href="class_opencv_sf_m_1_1_images_graph_connection.html#a72329276aeacf384241795e3541db218">getOrderedLinks</a>( bestMatches, MIN(nbMatches/2,100), nbMatches );
<a name="l00757"></a>00757     <span class="keywordtype">double</span> min_inliners=1e7;
<a name="l00758"></a>00758     <span class="keywordtype">int</span> index_of_min=0;
<a name="l00759"></a>00759     cv::Mat minFundamental;
<a name="l00760"></a>00760     <span class="keywordflow">for</span>( <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cpt=0;cpt&lt;bestMatches.size( );cpt++ )
<a name="l00761"></a>00761     {
<a name="l00762"></a>00762       <span class="comment">//construct the homography and choose the worse matches:</span>
<a name="l00763"></a>00763       <span class="comment">//( see Snavely &quot;Modeling the World from Internet Photo Collections&quot; )</span>
<a name="l00764"></a>00764       std::vector&lt;cv::Point2f&gt; pointsImg1, pointsImg2;
<a name="l00765"></a>00765       vector&lt;uchar&gt; status;
<a name="l00766"></a>00766       points_to_track[ bestMatches[ cpt ].imgSrc ]-&gt;getKeyMatches( tracks,
<a name="l00767"></a>00767         bestMatches[ cpt ].imgDest, pointsImg1 );
<a name="l00768"></a>00768       points_to_track[ bestMatches[ cpt ].imgDest ]-&gt;getKeyMatches( tracks,
<a name="l00769"></a>00769         bestMatches[ cpt ].imgSrc, pointsImg2 );
<a name="l00770"></a>00770 
<a name="l00771"></a>00771       <span class="comment">//compute the homography:</span>
<a name="l00772"></a>00772       cv::findHomography( pointsImg1,pointsImg2,status,CV_RANSAC,
<a name="l00773"></a>00773         ransac_threshold );
<a name="l00774"></a>00774       <span class="comment">//count the inliner points:</span>
<a name="l00775"></a>00775       <span class="keywordtype">double</span> inliners=0;
<a name="l00776"></a>00776       <span class="keywordflow">for</span>( <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i=0;i&lt;status.size( );++i )
<a name="l00777"></a>00777       {
<a name="l00778"></a>00778         <span class="keywordflow">if</span>( status[ i ] != 0 )
<a name="l00779"></a>00779           inliners++;
<a name="l00780"></a>00780       }
<a name="l00781"></a>00781       <span class="keywordtype">double</span> percent_inliner = inliners/<span class="keyword">static_cast&lt;</span><span class="keywordtype">double</span><span class="keyword">&gt;</span>( pointsImg1.size( ) );
<a name="l00782"></a>00782       <span class="keywordflow">if</span>( percent_inliner &lt; min_inliners )
<a name="l00783"></a>00783       {
<a name="l00784"></a>00784         min_inliners = percent_inliner;
<a name="l00785"></a>00785         index_of_min = cpt;
<a name="l00786"></a>00786         minFundamental = cv::findFundamentalMat( pointsImg1, pointsImg2,
<a name="l00787"></a>00787           status, cv::FM_RANSAC );
<a name="l00788"></a>00788       }
<a name="l00789"></a>00789     }
<a name="l00790"></a>00790     <span class="comment">//we will start the reconstruction using bestMatches[ index_of_min ]</span>
<a name="l00791"></a>00791     <span class="comment">//to avoid degenerate cases such as coincident cameras</span>
<a name="l00792"></a>00792     img1 = bestMatches[ index_of_min ].imgSrc;
<a name="l00793"></a>00793     img2 = bestMatches[ index_of_min ].imgDest;
<a name="l00794"></a>00794     cout&lt;&lt;img1&lt;&lt;<span class="stringliteral">&quot;, &quot;</span>&lt;&lt;img2&lt;&lt;endl;
<a name="l00795"></a>00795     <span class="comment">//sequence_.showTracksBetween(img1, img2);</span>
<a name="l00796"></a>00796 
<a name="l00797"></a>00797     vector&lt;int&gt; images_computed;
<a name="l00798"></a>00798     images_computed.push_back( img1 );
<a name="l00799"></a>00799     images_computed.push_back( img2 );
<a name="l00800"></a>00800     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ img1 ] = <span class="keyword">true</span>;
<a name="l00801"></a>00801     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a99b28486f11bc31a1d8b2b52ad3550a5">initialReconstruction</a>( img1, img2 );
<a name="l00802"></a>00802     <span class="comment">//bundleAdjustement();</span>
<a name="l00803"></a>00803 
<a name="l00804"></a>00804     <span class="comment">//now we have updated the position of the camera which take img2</span>
<a name="l00805"></a>00805     <span class="comment">//and 3D estimation from these 2 first cameras...</span>
<a name="l00806"></a>00806     <span class="comment">//Find for other cameras position:</span>
<a name="l00807"></a>00807     vector&lt;ImageLink&gt; images_close;
<a name="l00808"></a>00808     <span class="keywordtype">int</span> nbIter = 0 ;
<a name="l00809"></a>00809     <span class="comment">//while( nbMatches&gt;10 &amp;&amp; images_computed.size()&lt;cameras_.size()/2 &amp;&amp; nbIter&lt;4 )</span>
<a name="l00810"></a>00810     {
<a name="l00811"></a>00811       nbIter++;
<a name="l00812"></a>00812       images_close.clear( );
<a name="l00813"></a>00813       <span class="keywordflow">while</span> ( images_close.size( ) &lt; 2 )
<a name="l00814"></a>00814       {
<a name="l00815"></a>00815         <span class="keywordflow">for</span>(<span class="keywordtype">size_t</span> cpt = 0; cpt&lt;images_computed.size(); ++cpt )
<a name="l00816"></a>00816         {
<a name="l00817"></a>00817           images_graph.<a class="code" href="class_opencv_sf_m_1_1_images_graph_connection.html#aca15fc0c0206192d1c390ec4ded69e70">getImagesRelatedTo</a>( images_computed[ cpt ],
<a name="l00818"></a>00818             images_close, nbMatches * 0.8 - 10 );
<a name="l00819"></a>00819         }
<a name="l00820"></a>00820         nbMatches = nbMatches * 0.8 - 10;
<a name="l00821"></a>00821       }
<a name="l00822"></a>00822 
<a name="l00823"></a>00823       <span class="comment">//for each images, comptute the camera position:</span>
<a name="l00824"></a>00824       <span class="keywordflow">for</span>( <span class="keywordtype">size_t</span> cpt=0;cpt&lt;images_close.size( );cpt++ )
<a name="l00825"></a>00825       {
<a name="l00826"></a>00826         <span class="comment">//We don&#39;t want to compute twice the same camera position:</span>
<a name="l00827"></a>00827         <span class="keywordtype">int</span> new_id_image = -1,
<a name="l00828"></a>00828           old_id_image = -1;
<a name="l00829"></a>00829         <span class="keywordflow">if</span>( !<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ images_close[ cpt ].imgSrc ] )
<a name="l00830"></a>00830         {
<a name="l00831"></a>00831           new_id_image = images_close[ cpt ].imgSrc;
<a name="l00832"></a>00832           old_id_image = images_close[ cpt ].imgDest;
<a name="l00833"></a>00833         }
<a name="l00834"></a>00834         <span class="keywordflow">else</span>
<a name="l00835"></a>00835         {
<a name="l00836"></a>00836           new_id_image = images_close[ cpt ].imgDest;
<a name="l00837"></a>00837           old_id_image = images_close[ cpt ].imgSrc;
<a name="l00838"></a>00838         }
<a name="l00839"></a>00839 
<a name="l00840"></a>00840         <span class="keywordflow">if</span>( new_id_image &gt;= 0 )
<a name="l00841"></a>00841         {
<a name="l00842"></a>00842           <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#ae9b18130f68de37ae8385e01426ee82f">cameraResection</a>( new_id_image ) )
<a name="l00843"></a>00843             images_computed.push_back( new_id_image );
<a name="l00844"></a>00844         }
<a name="l00845"></a>00845       }
<a name="l00846"></a>00846 
<a name="l00847"></a>00847       <span class="comment">//Triangulate the points:</span>
<a name="l00848"></a>00848       <a class="code" href="class_opencv_sf_m_1_1_structure_estimator.html" title="This class tries to find the 3D structure using a sequence and cameras fully parameterized.">StructureEstimator</a> se( &amp;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>, &amp;this-&gt;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a> );
<a name="l00849"></a>00849 
<a name="l00850"></a>00850       vector&lt;int&gt; images_to_compute;
<a name="l00851"></a>00851       <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> key_size = <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>.size( );
<a name="l00852"></a>00852       <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i=0; i &lt; key_size; ++i )
<a name="l00853"></a>00853       {
<a name="l00854"></a>00854         <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[ i ] )
<a name="l00855"></a>00855           images_to_compute.push_back( i );
<a name="l00856"></a>00856       }
<a name="l00857"></a>00857 
<a name="l00858"></a>00858       <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a> = se.<a class="code" href="class_opencv_sf_m_1_1_structure_estimator.html#aebc57769a62034b549d9b729a3e8c1e7">computeStructure</a>( images_to_compute, 2 );
<a name="l00859"></a>00859       se.<a class="code" href="class_opencv_sf_m_1_1_structure_estimator.html#afe1217c1dfcb8d2336272f3faba163bc">removeOutliersTracks</a>( 5, &amp;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a> );
<a name="l00860"></a>00860       <a class="code" href="class_opencv_sf_m_1_1_sequence_analyzer.html#a694fee23e02afc94b48fcfea7ecf2512">SequenceAnalyzer::keepOnlyCorrectMatches</a>(<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a6b9217240500126545a9b3696f9bdcbe" title="Object containing all 2D information of this sequence.">sequence_</a>,2,0);
<a name="l00861"></a>00861       <span class="comment">// Performs a bundle adjustment</span>
<a name="l00862"></a>00862       <span class="comment">//bundleAdjustement();</span>
<a name="l00863"></a>00863     }<span class="comment">//*/</span>
<a name="l00864"></a>00864     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a9790538fdb197f1cebd4f9f26f85b975">bundleAdjustement</a>();
<a name="l00865"></a>00865     <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#aebf9a3c922dd816828bcbb3a6bc6df29">viewEstimation</a>();
<a name="l00866"></a>00866   }
<a name="l00867"></a>00867 
<a name="l00868"></a><a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#aebf9a3c922dd816828bcbb3a6bc6df29">00868</a>   <span class="keywordtype">void</span> <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#aebf9a3c922dd816828bcbb3a6bc6df29">EuclideanEstimator::viewEstimation</a>()
<a name="l00869"></a>00869   {
<a name="l00870"></a>00870     vector&lt;cv::Vec3d&gt; tracks3D;
<a name="l00871"></a>00871     vector&lt; unsigned int &gt; colors;
<a name="l00872"></a>00872     vector&lt;TrackOfPoints&gt;::iterator itTrack=<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.begin( );
<a name="l00873"></a>00873     <span class="keywordflow">while</span> ( itTrack != <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a18a1079d31b360865cf58088f71e7a36" title="list of 3D points computed">point_computed_</a>.end( ) )
<a name="l00874"></a>00874     {
<a name="l00875"></a>00875       tracks3D.push_back( ( cv::Vec3d )( *itTrack ) );
<a name="l00876"></a>00876       colors.push_back( itTrack-&gt;getColor() );
<a name="l00877"></a>00877       itTrack++;
<a name="l00878"></a>00878     }
<a name="l00879"></a>00879 
<a name="l00881"></a>00881     <span class="comment">// Open 3D viewer and add point cloud</span>
<a name="l00882"></a>00882     <a class="code" href="class_opencv_sf_m_1_1_visualizer.html" title="This class can be used to view the differents object involved in current structure from motion proces...">Visualizer</a> debugView ( <span class="stringliteral">&quot;Debug viewer&quot;</span> );
<a name="l00883"></a>00883     debugView.<a class="code" href="class_opencv_sf_m_1_1_visualizer.html#aae0430188f5f68dd792d2a2d8305d690">add3DPointsColored</a>( tracks3D,colors, <span class="stringliteral">&quot;Euclidean estimated&quot;</span> );
<a name="l00884"></a>00884     
<a name="l00885"></a>00885     <span class="keywordflow">for</span>( <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i&lt;<a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>.size( ) ; ++i )
<a name="l00886"></a>00886       <span class="keywordflow">if</span>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#a678b38f149b13435499e5e3f8bc83ca3" title="List of camera computed.">camera_computed_</a>[i] )
<a name="l00887"></a>00887       {
<a name="l00888"></a>00888         std::stringstream cam_name;
<a name="l00889"></a>00889         cam_name&lt;&lt;<span class="stringliteral">&quot;Cam&quot;</span>&lt;&lt; ( i+1 );
<a name="l00890"></a>00890         debugView.<a class="code" href="class_opencv_sf_m_1_1_visualizer.html#a304a33dc1adcbc061c65b8eec87c6fcc">addCamera</a>( <a class="code" href="class_opencv_sf_m_1_1_euclidean_estimator.html#adcf96ff6971f5ccd6c77c83c8e76e7e9" title="List of cameras (intra and extern parameters...)">cameras_</a>[ i ],
<a name="l00891"></a>00891           cam_name.str() );
<a name="l00892"></a>00892       }<span class="comment">//*/</span>
<a name="l00893"></a>00893       
<a name="l00894"></a>00894 
<a name="l00895"></a>00895 
<a name="l00896"></a>00896       debugView.<a class="code" href="class_opencv_sf_m_1_1_visualizer.html#af39fe6832962888e4c81c5a4ae6c6f55">runInteract</a>( );
<a name="l00897"></a>00897   }
<a name="l00898"></a>00898 }
</pre></div></div>
</div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Variables</a></div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>



<hr class="footer"/><address class="footer"><small>
Generated on Sun Aug 21 2011 16:45:52 for GSoC2011SfM by &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.7.5.1
</small></address>

</body>
</html>
