#include <btl/ViewGeometry/FivePointPose.hpp>
#include <btl/ViewGeometry/DisambiguateEssential.hpp>
#include <btl/ViewGeometry/EssentialMatrix.hpp>
#include <btl/ViewGeometry/PointTriangulation.hpp>
#include <btl/Maths/FloatComparison.hpp>
#include <btl/Maths/GaussJordanElimination.hpp>
#include <btl/RobustEstimation/PreemptiveRANSAC.hpp>

#include "TurntableController.hpp"
#include "DrawAxisMarker.hpp"
#include "DrawCameraMarker.hpp"
#include <btl/extra/Gui/ColourMap.hpp>
#include <btl/extra/Simulation/PointCloud.hpp>

#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <Eigen/Eigenvalues>

// ### WARNING ###
// Storing Eigen::* objects inside std::vector could break things
// due to incorrect alignment (though actually I slightly question
// whether having unaligned storage could cause errors unless
// attempting to actually perform calculations directly on the
// values in the std::vector rather than copying elements out and
// working on them locally)

#include <boost/random.hpp>

#include <GL/glfw.h>

#include <iostream>
#include <sstream>
#include <vector>
#include <string>
#include <algorithm>
#include <iterator>
#include <stdexcept>
#include <limits>
#include <cassert>
#include <cmath>

const int CLOSE_ULPS = 32;
const double CLOSE_TOLERANCE = std::numeric_limits<double>::epsilon() * double(CLOSE_ULPS);
const double RADIANS_PER_DEGREE = M_PI/180.0;
const double DEGREES_PER_RADIAN = 180.0/M_PI;

const double CAMERA_SIZE = 0.3;
const double CAMERA_FOV = 80.0 * RADIANS_PER_DEGREE;

using namespace btl::extra::simulation;

void initGL() {
   glEnable(GL_DEPTH_TEST);
   glDisable(GL_TEXTURE_2D);
   glDisable(GL_LIGHTING);
   glEnable(GL_LINE_SMOOTH);
   glDisable(GL_CULL_FACE);
   glEnable(GL_BLEND);
   glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);

   glClearColor(0.15, 0.15, 0.15, 1.0);
}

Eigen::Vector2f viewPlaneMousePt(int x, int y, double fov) {
   int w, h;
   glfwGetWindowSize(&w, &h);

   const double aspect = double(h) / double(w);
   const double extentx = std::tan(fov/2.0);
   const double extenty = extentx * aspect;

   const double xx = (double( x) * 2.0 / w - 1.0) * extentx;
   const double yy = (double(-y) * 2.0 / h + 1.0) * extenty;

   return Eigen::Vector2f(xx, yy);
}

void initView(const TurntableController& mouseball, double fov) {
   int width, height;
   glfwGetWindowSize(&width, &height);

   glViewport(0,0,width,height);

   const double aspect = double(height) / double(width);
   const double extentx = std::tan(fov/2.0);
   const double extenty = extentx * aspect;

   // draw the axis marker
   // nb: must be done before setting the projection, because
   //     DrawAxisMarker messes with the projection matrix
   DrawAxisMarker(fov).render(
      width, height, mouseball.getViewRotation());

   // set up the projection matrix
   glMatrixMode(GL_PROJECTION);
   glLoadIdentity();
   glFrustum(-extentx, extentx, -extenty, extenty, 1.0, 1000.0);

   // OpenGL expects the view to be along negative Z
   // (whereas we have it along positive Z)
   glScaled(1.0, 1.0, -1.0);

   // set up the view according to the controller
   const Eigen::Projective3d view = mouseball.getWorldToView();
   glMultMatrixd(view.matrix().data());

   glMatrixMode(GL_MODELVIEW);
   glLoadIdentity();
}

void renderWorldPoints(const PointCloud& world) {
   // using vertex arrays to render the point data, because there's
   // a lot of them and we already have them in the correct layout
   glEnableClientState(GL_VERTEX_ARRAY);

   // render all the world points
   glVertexPointer(3, GL_DOUBLE, 0, reinterpret_cast<const void*>(world.points.data()));
   glColor4f(1.0f, 1.0f, 0.0f, 1.0f);
   glDrawArrays(GL_POINTS, 0, world.points.cols());

   glDisableClientState(GL_VERTEX_ARRAY);
}

void renderCamera(
      const Eigen::Isometry3d& cameraToWorld,
      const Eigen::Vector4f& col, const Eigen::Vector4f& zcol) {
   DrawCameraMarker marker;
   marker.z = CAMERA_SIZE;
   marker.colour = col;

   glPushMatrix();
   glMultMatrixd(cameraToWorld.data());
   marker.render();

   if (zcol(3) > 0.0f) {
      glColor4f(zcol(0), zcol(1), zcol(2), zcol(3));
      glBegin(GL_LINES);
      glVertex3d(0.0, 0.0, 0.0);
      glVertex3d(0.0, 0.0, CAMERA_SIZE);
      glEnd();
   }

   glPopMatrix();
}

void renderCamera(const Eigen::Isometry3d& cameraToWorld, const Eigen::Vector4f& col) {
   DrawCameraMarker marker;
   marker.z = CAMERA_SIZE;
   marker.colour = col;

   glPushMatrix();
   glMultMatrixd(cameraToWorld.data());
   marker.render();
   glPopMatrix();
}

void findCommonIndexes(std::vector<int>& common, const std::vector<int>& a, const std::vector<int>& b) {
   common.clear();
   common.reserve(std::min(a.size(), b.size()));

   std::vector<int>::const_iterator idxA = a.begin();
   std::vector<int>::const_iterator idxB = b.begin();
   while (idxA != a.end() && idxB != b.end()) {
      if (*idxA == *idxB) {
         common.push_back(*idxA);
         ++idxA;
         ++idxB;
      } else {
         if (*idxA < *idxB) ++idxA;
         if (*idxB < *idxA) ++idxB;
      }
   }
}

struct FivePointModelGenerator : public btl::ModelSearchSpace<Eigen::Matrix3d>
{
   public:
      FivePointModelGenerator(
            const Frame& f1,
            const Frame& f2,
            const std::vector<int>& obsMap):
         frame1(f1), frame2(f2), obsMap(obsMap)
      {
      }

      virtual int numObservations() const
      {
         return obsMap.size();
      }

      virtual void buildModels(const int obsIdxes[], int numObs, std::vector<Eigen::Matrix3d>& output) const
      {
         btl::FivePointStewenius fivePt;
         Eigen::Matrix3Xd x1s(3, numObs);
         Eigen::Matrix3Xd x2s(3, numObs);

         for (int i = 0; i < numObs; ++i) {
            int idx = obsMap[obsIdxes[i]];
            x1s.col(i) = frame1.measurements.col(idx);
            x2s.col(i) = frame2.measurements.col(idx);
         }

         // generate hypotheses
         fivePt.compute(x1s, x2s);
         for (int sol = 0; sol < fivePt.numSolutions(); ++sol)
            output.push_back(fivePt.solution(sol));
      }

      virtual double errorMetric(const Eigen::Matrix3d& E, int obsIdx) const
      {
         int idx = obsMap[obsIdx];
         double v = frame2.measurements.col(idx).transpose() * E * frame1.measurements.col(idx);
         return std::abs(v);
      }

   private:
      const Frame& frame1;
      const Frame& frame2;
      const std::vector<int>& obsMap;
};

void renderFivePointFrame(
      const PointCloud& world,
      const Frame& frame0,
      const Frame& frame1,
      boost::mt19937& rng) {

   const Eigen::Isometry3d& worldToCamera0 = frame0.worldToCamera;
   const Eigen::Isometry3d cameraToWorld0 = worldToCamera0.inverse();
   const Eigen::Matrix3Xd& measurements0 = frame0.measurements;
   const int N = measurements0.cols();

   boost::random_number_generator<boost::mt19937> std_rng(rng);

   const int SampleSize = 5;
   const int SampleCount = 500;
   const int BlockSize = 50;
   const double InlierThreshold = 1e100; // the simulated data doesn't have any outliers

   const Eigen::Vector4f Green = Eigen::Vector4f(0.0f, 1.0f, 0.0f, 1.0f);
   const Eigen::Vector4f Yellow = Eigen::Vector4f(1.0f, 1.0f, 0.0f, 1.0f);

   // should be able to test a few hundred samples at frame-rate,
   // otherwise RANSAC on 5-point-pose won't really work very well...

   const Eigen::Isometry3d& worldToCamera1 = frame1.worldToCamera;
   const Eigen::Isometry3d cameraToWorld1 = worldToCamera1.inverse();

   // relPose takes you from camera 1 to camera 0
   const Eigen::Isometry3d relPose = worldToCamera0 * cameraToWorld1;
   const Eigen::Matrix3Xd& measurements1 = frame1.measurements;
   const double baselineLength = relPose.translation().norm();

   std::vector<int> commonIndexes;
   findCommonIndexes(commonIndexes, frame0.indexes, frame1.indexes);

   btl::PreemptiveRANSAC<Eigen::Matrix3d, SampleSize> ransac(SampleCount, InlierThreshold, BlockSize);
   FivePointModelGenerator fivePtModel(frame0, frame1, commonIndexes);

   const btl::extra::ColourMap& jet = btl::extra::ColourMap::Jet;

   // render the best hypothesis

   boost::optional<Eigen::Matrix3d> bestE = ransac.findModel(fivePtModel, std_rng);

   if (bestE) {
      const Eigen::Vector4f colour = Eigen::Vector4f(0.0f, 1.0f, 0.0f, 1.0f);

      const Eigen::Matrix3d& E = *bestE;
      // decompose E to four relative poses
      btl::DecomposeEssentialHorn rtE(E,
         btl::DecomposeEssentialHorn::ComputeNormalisedT |
         btl::DecomposeEssentialHorn::ComputeNormalisedE);
      const Eigen::Matrix3d& normE = rtE.normalisedE();

      // select the correct pose based on one point
      btl::DisambiguateEssential calcP(
         rtE.rotationX(), rtE.rotationY(), rtE.translation(),
         measurements0.col(commonIndexes[0]), measurements1.col(commonIndexes[0]));
      const Eigen::Isometry3d& Rt = calcP.pose();

      // apply our known baseline
      Eigen::Isometry3d reconstructedRelPose
         = Eigen::Translation3d(baselineLength * Rt.translation())
         * Eigen::Isometry3d(Rt.rotation());

      // render the best camera we found
      renderCamera(cameraToWorld0 * reconstructedRelPose.inverse(), colour);
   }
}

void renderFivePointRansacTest(
      const PointCloud& world,
      const Frame& frame0,
      const Eigen::Isometry3d& cameraToWorldN,
      const NoisyPointSensor& simCamera,
      double alpha,
      boost::mt19937& rng) {

   assert(alpha >= 0.0 && alpha <= 1.0);

   const Eigen::Isometry3d cameraToWorld0 = frame0.worldToCamera.inverse();

   // render the origin camera in white
   renderCamera(cameraToWorld0, Eigen::Vector4f::Ones());

   Eigen::Isometry3d cameraN = lerpTransform(alpha, cameraToWorld0, cameraToWorldN);
   Frame frameN = simCamera.capture(cameraN.inverse(), world.points, rng);

   //renderCamera(cameraN, Eigen::Vector4f::Ones());
   renderFivePointFrame(world, frame0, frameN, rng);
}

int main(int argc, char** argv) {
   try {
      boost::mt19937 rng(361385024u);
      boost::mt19937 animationRng;

      std::cout << "Generating randomised landmarks...\n";

      // generate a random world with N landmarks
      const int N = 1000;
      const Eigen::Vector3d minCoord(-10.0, -10.0, 10.0);
      const Eigen::Vector3d maxCoord( 10.0,  10.0, 20.0);
      PointCloud world;
      world.setUniformBox(N, minCoord, maxCoord, rng);

      std::cout << "Simulating motion...\n";

      // generate a simple linear camera motion
      const Eigen::Isometry3d
         camera0
            = Eigen::Translation3d(-5.0, 0.0, 0.0)
            * Eigen::Isometry3d(Eigen::AngleAxisd(
               10.0*RADIANS_PER_DEGREE,
               Eigen::Vector3d::UnitY())
            );
      const Eigen::Isometry3d
         camera1
            = Eigen::Translation3d(15.0, 0.0, 0.0)
            * Eigen::Isometry3d(Eigen::AngleAxisd(
               -35.0*RADIANS_PER_DEGREE,
               Eigen::Vector3d::UnitY())
            );

      NoisyPointSensor simCamera;
      simCamera.excludeBehindCamera = true;
      simCamera.excludeOutOfFrame = true;
      simCamera.resX = 752;
      simCamera.resY = 480;
      simCamera.fovX = CAMERA_FOV;
      // sigma = 0.25
      // this corresponds with 95.5% of measurements being less than 0.5 pixels away
      // from the true position. this is roughly the same as saying that 95.5% of
      // features will be measured correctly to the nearest pixel
      simCamera.measurementNoise = 0.25;

      std::cout << "Capturing first frame...\n";

      Frame frame0 = simCamera.capture(camera0.inverse(), world.points, rng);

      glfwInit();
      glfwOpenWindow(800, 600, 8, 8, 8, 8, 16, 0, GLFW_WINDOW);
      glfwEnable(GLFW_STICKY_KEYS);

      initGL();

      TurntableController mouseball;
      const double VIEW_FOV = 60.0 * RADIANS_PER_DEGREE;

      bool running = true;
      int wheelPos = glfwGetMouseWheel();

      const double motionPeriod = 5.0; // seconds for the motion
      const double loopPeriod = motionPeriod + 1.5; // seconds for the whole loop (pause at the end for 1.5 seconds)
      double startTime = glfwGetTime();
      while (running) {
         glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
         initView(mouseball, VIEW_FOV);
         renderWorldPoints(world);

         double thisTime = glfwGetTime();
         double deltaT = (thisTime - startTime);
         double alpha = std::min(deltaT / motionPeriod, 1.0);
         if (deltaT > loopPeriod) {
            deltaT = loopPeriod;
            startTime = thisTime;
         }

         renderFivePointRansacTest(
            world, frame0, camera1, simCamera,
            alpha, animationRng);

         glfwSwapBuffers();

         // process input
         int x, y;
         glfwGetMousePos(&x, &y);
         const Eigen::Vector2f mousePt = viewPlaneMousePt(x, y, VIEW_FOV);

         const bool leftDown = bool(glfwGetMouseButton(GLFW_MOUSE_BUTTON_LEFT));
         const bool rightDown = bool(glfwGetMouseButton(GLFW_MOUSE_BUTTON_RIGHT));
         const bool ctrlDown = glfwGetKey(GLFW_KEY_LCTRL) || glfwGetKey(GLFW_KEY_RCTRL);

         const int wheelDelta = glfwGetMouseWheel() - wheelPos;
         wheelPos += wheelDelta;

         if (mouseball.isDragging()) {
            if (!leftDown && !rightDown)
               mouseball.endDrag(mousePt);
            else
               mouseball.updateDrag(mousePt);
         } else {
            if (leftDown)
               mouseball.beginDragRotate(mousePt);
            else if (rightDown) {
               if (!ctrlDown)
                  mouseball.beginDragLookat(mousePt);
               else
                  mouseball.beginDragLookatZ(mousePt);
            }
         }

         if (wheelDelta)
            mouseball.scrollRange(wheelDelta*120);

         running =
            !glfwGetKey(GLFW_KEY_ESC) && glfwGetWindowParam(GLFW_OPENED);
      }
      glfwTerminate();

      return 0;
   } catch (std::exception& e) {
      std::cerr << "Uncaught exception: " << e.what() << "\n";
      return 1;
   } catch (...) {
      std::cerr << "Uncaught exception: (unknown)\n";
      return 1;
   }
}
