/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE_render file in the root directory of this subproject. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/

#pragma once

#include <string>
#include <vector>

#include "OpticalFlowInterface.h"
#include "CvUtil.h"

namespace surround360 {
namespace optical_flow {

using namespace std;
using namespace cv;

// when rendering panoramas from slices of many novel views, there is lots of
// wasted computation. this is an idea for reducing that computation: build up
// a datastructure of just the pieces of the novel views we need, then do it
// all in 1 pass
struct LazyNovelViewBuffer {
  int width, height;
  // warpL[u][v] = (x, y, t). in the final panorama image at pixel coord u, v
  // we will take a piece of the novel view image at x, y, and time shift t.
  vector<vector<Point3f>> warpL;
  vector<vector<Point3f>> warpR;

  LazyNovelViewBuffer(int width, int height) {
    this->width = width;
    this->height = height;
    warpL = vector<vector<Point3f>>(width, vector<Point3f>(height));
    warpR = vector<vector<Point3f>>(width, vector<Point3f>(height));
  }
};

struct NovelViewUtil {
  // given an image, a flow vector field of the same size, generate a new image by
  // applying the flow to the input, scaled by t.
  static Mat generateNovelViewSimpleCvRemap(
    const Mat& srcImage,
    const Mat& flow,
    const double t);

  // for a left/right image pair, we compute flow from left to right, and right to left.
  // generate novel view by warping the left image to be like the right, and the right
  // image to be like the left, then blend the two warped results. in cases where ghosting
  // would occur (because the left and right warped images are not similar in color at
  // some pixels), a ghosting resolution heuristic is applied which prefers the pixel
  // color with the larger magnitude flow (which corresponds to the closer object in cases
  // of occlusion). to avoid seams, the logic for determining if/how much deghosting to
  // apply is not binary; instead it uses softmaxes to smoothly transition between normal
  // blending and flow-magnitude weighted blending.
  static Mat combineNovelViews(
    const Mat& imageL,
    const float blendL,
    const Mat& imageR,
    const float blendR,
    const Mat& flowLtoR,
    const Mat& flowRtoL);

  // similar to combineNovelViews, but imageL and imageR are expected to be generated
  // by rendering a LazyNovelViewBuffer, which means their alpha channel encodes the
  // amount of blending/warping. uses similar flow based deghosting.
  static Mat combineLazyViews(
    const Mat& imageL,
    const Mat& imageR,
    const Mat& flowMagL,
    const Mat& flowMagR);
};

// the is an abstract base class for novel view generators
class NovelViewGenerator {
public:
  virtual ~NovelViewGenerator() {};

  // a NovelViewGenerator may be asked to generate many novel views, but it
  // only needs to do somethings once (like computing disparity). this will be
  // called before anything else.
  virtual void prepare(
    const Mat& colorImageL,
    const Mat& colorImageR,
    const Mat& prevFlowLtoR,
    const Mat& prevFlowRtoL,
    const Mat& prevColorImageL,
    const Mat& prevColorImageR) = 0;

  // simplified version of prepare which doesn't require previous frame data,
  // to be used when we don't care about temporal regularization.
  void prepare(const Mat& colorImageL, const Mat& colorImageR) {
    Mat prevFlowLtoR, prevFlowRtoL, prevColorImageL, prevColorImageR;
    prepare(
      colorImageL,
      colorImageR,
      prevFlowLtoR,
      prevFlowRtoL,
      prevColorImageL,
      prevColorImageR);
  }
  // generate a novel that synthesizes a camera between colorImageL and colorImageR.
  // when shiftFromL = 0, this will be ~equal to the original colorImageL, and when
  // shiftFromL = 1, the novel view is at colorImageR. results are generated by warping
  // both the left and right input images and combining the two warped images- the main
  // result is in outNovelViewMerged. outNovelViewFromL/R are for debugging/visualization.
  virtual void generateNovelView(
    const double shiftFromL,
    Mat& outNovelViewMerged,
    Mat& outNovelViewFromL,
    Mat& outNovelViewFromR) = 0;

  // returns a pair where the first item is the novel view image
  // (with channel 3 encoding novel view shift value), and the second item is
  // a matrix of flow magnitudes (which are used later in deghosting).
  virtual pair<Mat, Mat> renderLazyNovelView(
    const int width,
    const int height,
    const vector<vector<Point3f>>& novelViewWarpBuffer,
    const Mat& srcImage,
    const Mat& opticalFlow,
    const bool invertT) = 0;

  // a LazyNovelViewBuffer contains all the data needed to render a chunk of a
  // stereo panorama. implementations should return a left/right eye image pair.
  // left and right image indices are passed in so we can get samples of matched
  // pixels for the purpose of color calibration.
  virtual pair<Mat, Mat> combineLazyNovelViews(
    const LazyNovelViewBuffer& lazyBuffer) = 0;

  // for debugging
  virtual Mat getFlowLtoR() { return Mat(); }
  virtual Mat getFlowRtoL() { return Mat(); }
};

// this is a base class for novel view generators that work by reduction to optical flow.
// it handles lazy generation fo the novel views, given flow.
class NovelViewGeneratorLazyFlow : public NovelViewGenerator {
public:
  Mat imageL, imageR;
  Mat flowLtoR, flowRtoL;

  ~NovelViewGeneratorLazyFlow() {}

  void generateNovelView(
    const double shiftFromL,
    Mat& outNovelViewMerged,
    Mat& outNovelViewFromL,
    Mat& outNovelViewFromR);

  pair<Mat, Mat> renderLazyNovelView(
    const int width,
    const int height,
    const vector<vector<Point3f>>& novelViewWarpBuffer,
    const Mat& srcImage,
    const Mat& opticalFlow,
    const bool invertT);

  pair<Mat, Mat> combineLazyNovelViews(const LazyNovelViewBuffer& lazyBuffer);

  Mat getFlowLtoR() { return flowLtoR; }
  Mat getFlowRtoL() { return flowRtoL; }
};

// the name "asymmetric" here refers to the idea that we compute an optical flow from
// left image to right image, and from right to left, and these flows may not necessarily
// be symmetric. however this is convenient because it is general reduction to optical
// flow. we might try other ideas, e.g. generate both flows simultaneously in a way that
// avoids redundant calculations. that would be a different subclass of
// NovelViewGeneratorLazyFlow.
class NovelViewGeneratorAsymmetricFlow : public NovelViewGeneratorLazyFlow {
public:
  string flowAlgName;

  NovelViewGeneratorAsymmetricFlow(const string flowAlgName) : flowAlgName(flowAlgName) {}

  ~NovelViewGeneratorAsymmetricFlow() {}

  void prepare(
    const Mat& colorImageL,
    const Mat& colorImageR,
    const Mat& prevFlowLtoR,
    const Mat& prevFlowRtoL,
    const Mat& prevColorImageL,
    const Mat& prevColorImageR);
};

} // namespace reprojection
} // namespace surround360
