﻿using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
using Emgu.CV.CvEnum;
using Emgu.CV.UI;

namespace panorama
{
    public partial class Form1 : Form
    {
        Image<Bgr, Byte> leftImage;
        Image<Bgr, Byte> rightImage;
        Image<Bgr, Byte> surfImage;
        
        List<PointF> surfPoint = new List<PointF>();
        List<PointF> modelPoints = new List<PointF>();
        List<PointF> observePoints = new List<PointF>();

        Matrix<float> homographyMatrix = null;

        Image<Bgr, Byte> warpImage1 = new Image<Bgr, Byte>(800, 400);
        Image<Bgr, Byte> warpImage2 = new Image<Bgr, Byte>(800, 400);

        int X, Y;


        public Form1()
        {
            InitializeComponent();
        }

        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog dlg = new OpenFileDialog();

            if (dlg.ShowDialog() == DialogResult.OK)
            {
                try
                {
                    leftImage = new Image<Bgr, Byte>(dlg.FileName);
                    input1.Image = leftImage;

                }

                catch (Exception ex)
                {

                    MessageBox.Show(ex.Message);

                }
            }
        }

        private void button2_Click(object sender, EventArgs e)
        {

            OpenFileDialog dlg = new OpenFileDialog();

            if (dlg.ShowDialog() == DialogResult.OK)
            {
                try
                {
                    rightImage = new Image<Bgr, Byte>(dlg.FileName);
                    input2.Image = rightImage;

                }

                catch (Exception ex)
                {

                    MessageBox.Show(ex.Message);

                }
            }
        }

        private Image<Bgr, Byte> surf()
        {

            Image<Gray, Byte> modelImage = new Image<Gray,byte>(leftImage.Width, leftImage.Height);

            CvInvoke.cvCvtColor(leftImage, modelImage, COLOR_CONVERSION.CV_BGR2GRAY);
            modelImage.Canny(new Gray(150), new Gray(150));
            modelImage.SmoothBlur(1, 1);


            #region 입력한 첫번째 이미지에서 특징을 추출.
            MCvSURFParams param1 = new MCvSURFParams(500, false);
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
            SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
            SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });

            //이미지에서 찾은 특징을 트리로 만듬
            FeatureTree featureTreePositiveLaplacian = new FeatureTree(
               Array.ConvertAll<SURFFeature, Matrix<float>>(
                  modelFeaturesPositiveLaplacian,
                  delegate(SURFFeature f) { return f.Descriptor; }));
            FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
               Array.ConvertAll<SURFFeature, Matrix<float>>(
                  modelFeaturesNegativeLaplacian,
                  delegate(SURFFeature f) { return f.Descriptor; }));
            #endregion

            Image<Gray, Byte> observedImage = new Image<Gray, byte>(rightImage.Width, rightImage.Height);

            CvInvoke.cvCvtColor(rightImage.Ptr, observedImage.Ptr, COLOR_CONVERSION.CV_BGR2GRAY);
            observedImage.Canny(new Gray(150), new Gray(150));
            observedImage.SmoothBlur(1, 1);


            #region 입력한 두번째 이미지에서 특징점을 추출.
            MCvSURFParams param2 = new MCvSURFParams(500, false);
            SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
            SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
            SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
            #endregion


            double matchDistanceRatio = 0.3;

            #region 각 이미지에서 추출한 특징점을 비교해서 매칭
            Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
               imageFeaturesPositiveLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; });
            Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
               imageFeaturesNegativeLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; });
            Matrix<Int32> result1;
            Matrix<double> dist1;

            featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
            MatchSURFFeatureWithFeatureTree(
              modelFeaturesPositiveLaplacian,
              imageFeaturesPositiveLaplacian,
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);

            featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
            MatchSURFFeatureWithFeatureTree(
                 modelFeaturesNegativeLaplacian,
                 imageFeaturesNegativeLaplacian,
                 matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
            #endregion

            


            #region 이미지 들을 하나의 이미지로 합쳐줌
            Image<Bgr, Byte> res = new Image<Bgr, byte>(modelImage.Width + observedImage.Width, Math.Max(modelImage.Height, observedImage.Height));
            res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
            leftImage.Copy(res, null);            
            res.ROI = Rectangle.Empty;

            res.ROI = new System.Drawing.Rectangle(observedImage.Width, 0, observedImage.Width, observedImage.Height);
            rightImage.Copy(res, null);
            res.ROI = Rectangle.Empty;
            #endregion

            #region 하나로 합쳐진 이미지에 특징점 끼리 선을 그어서 표시

            surfPoint.Clear();
            for (int i = 0; i < modelPoints.Count; i++)
            { 
                MCvScalar a = new MCvScalar(0, 0, 255);
                PointF p = observePoints[i];
                PointF temp = observePoints[i];
                Point modelPointTemp = new Point();
                Point observePointTemp = new Point();

                modelPointTemp.X = (int)modelPoints[i].X;
                modelPointTemp.Y = (int)modelPoints[i].Y;

                observePointTemp.X = (int)observePoints[i].X + modelImage.Width;
                observePointTemp.Y = (int)observePoints[i].Y;
                
                                
                p.X = modelPoints[i].X - p.X;
                temp.X += modelImage.Width;

                CvInvoke.cvCircle(res.Ptr, modelPointTemp, 5, a, 3, LINE_TYPE.EIGHT_CONNECTED, 0);
                CvInvoke.cvCircle(res.Ptr, observePointTemp, 5, a, 3, LINE_TYPE.EIGHT_CONNECTED, 0);
                //res.Draw(new LineSegment2DF(modelPoints[i], temp), new Bgr(255,255,(255 - (i*i))), 1);
                      
                p.Y = p.Y - modelPoints[i].Y;
                surfPoint.Add(p);
            }           
           
            #endregion            
            
            return res;


        }
        //추출된 특징점을 비교해서 이미지 사이의 매칭점을 찾는 작업
        private static void MatchSURFFeatureWithFeatureTree(SURFFeature[] modelFeatures, SURFFeature[] imageFeatures, double matchDistanceRatio, int[,] result1, double[,] dist1, List<PointF> modelPointList, List<PointF> imagePointList)
        {
            for (int i = 0; i < result1.GetLength(0); i++)
            {
                int bestMatchedIndex = 0;
                int secondBestMatchedIndex = 0;
                
                if (dist1[i, 0] < dist1[i, 1])
                {
                    bestMatchedIndex = result1[i, 0];
                    secondBestMatchedIndex = result1[i, 1];
                }
                else
                {
                    bestMatchedIndex = result1[i, 1];
                    secondBestMatchedIndex = result1[i, 0];
                }

                SURFFeature bestMatchedModelPoint = bestMatchedIndex >= 0 ? modelFeatures[bestMatchedIndex] : null;
                SURFFeature secondBestMatchedModelPoint = secondBestMatchedIndex > 0 ? modelFeatures[secondBestMatchedIndex] : null;

                if (bestMatchedModelPoint != null)
                {
                    double distanceRatio = dist1[i, 0] / dist1[i, 1];
                    if (secondBestMatchedModelPoint == null || distanceRatio <= matchDistanceRatio || distanceRatio >= (1.0 / matchDistanceRatio))
                    {
                        //각 이미지의 매칭점 좌표를 리스트로 저장
                        modelPointList.Add(bestMatchedModelPoint.Point.pt);
                        imagePointList.Add(imageFeatures[i].Point.pt);
                    }
                }
            }
        }

        private void AlignmentImages(List<PointF> surfPoint, Matrix<float> homo)
        {
            PointF avg = CalcMatchPointAvg(surfPoint);            
            Image<Bgr, Byte> a = new Image<Bgr, Byte>(leftImage.Width * 2, leftImage.Height * 2);
            Image<Bgr, Byte> b = new Image<Bgr, Byte>(rightImage.Width * 2, rightImage.Height * 2);
            Image<Hsv, Byte> blendTemp = new Image<Hsv, byte>(rightImage.Width, rightImage.Height);
            //Image<Bgr, Byte> tempLeft;
            //Image<Bgr, Byte> tempRight;

            PointF observeImageMinPoint = new PointF();


            observeImageMinPoint.X = observePoints.ElementAt(0).X;
            observeImageMinPoint.Y = observePoints.ElementAt(0).Y;


            for (int i = 0; i < observePoints.Count; i++)
            {
                if (observeImageMinPoint.X > observePoints.ElementAt(i).X)
                    observeImageMinPoint.X = observePoints.ElementAt(i).X;

                else if (observeImageMinPoint.Y > observePoints.ElementAt(i).Y)
                    observeImageMinPoint.Y = observePoints.ElementAt(i).Y;
              
            }

            X = (int)Math.Abs((decimal)avg.X) + (int)observeImageMinPoint.X;
            Y = (int)Math.Abs(20 - (decimal)avg.Y);

            try
            {
                
                warp1.Image = WarpImage(leftImage, modelPoints, observePoints, true);
                CvInvoke.cvSaveImage("1.bmp", warp1.Image.Ptr);
                
                warp2.Image = WarpImage(rightImage, observePoints, modelPoints, false);
                CvInvoke.cvSaveImage("2.bmp", warp2.Image.Ptr);
                
                
            }

            catch(Exception ex)
            {
                MessageBox.Show(ex.ToString());
            }
            
//            return res;
        }

        private Image<Hsv, Byte> BlendImage(Image<Bgr, Byte> modelImage, Image<Bgr, Byte> destImage, PointF avgPoint)
        {
            Image<Hsv, Byte> modelImageTemp = new Image<Hsv, byte>(modelImage.Width, modelImage.Height);
            Image<Hsv, Byte> destImageTemp = new Image<Hsv, byte>(destImage.Width, destImage.Height);

            CvInvoke.cvCvtColor(modelImage, modelImageTemp, COLOR_CONVERSION.CV_BGR2HSV);
            CvInvoke.cvCvtColor(destImage, destImageTemp, COLOR_CONVERSION.CV_BGR2HSV);

            return destImageTemp;
        }


        private Image<Bgr, Byte> WarpImage(Image<Bgr, Byte> target,List<PointF> modelPoints, List<PointF> observePoints, bool left)
        {
            Image<Bgr, Byte> warpImage = new Image<Bgr,byte>(target.Width, target.Height);
            Matrix<float> warpedMatrix = new Matrix<float>(3, 3);            
            Matrix<float> homographyMatrix;


            homographyMatrix = CameraCalibration.FindHomography(
                modelPoints.ToArray(), //이미지 1의 좌표 리스트
                observePoints.ToArray(), //이미지 2의 좌표 리스트
                HOMOGRAPHY_METHOD.RANSAC,
                3).Convert<float>();

            
                warpedMatrix = homographyMatrix;
                warpedMatrix[0, 2] = 0;
            
                homographyMatrix.Mul(warpedMatrix);
             
            
            MCvScalar s = new MCvScalar(0);
            CvInvoke.cvWarpPerspective(target.Ptr, warpImage.Ptr, homographyMatrix, (int)INTER.CV_INTER_LINEAR|(int)WARP.CV_WRAP_DEFAULT, s);

            return warpImage;
        }          

        private PointF CalcMatchPointAvg(List<PointF> pointList)
        {
            float maxX = pointList.ElementAt(0).X;
            float maxY = pointList.ElementAt(0).Y;
            PointF avgPoint = new PointF();
            
            for (int i = 0; i < pointList.Count(); i++)
            {

                avgPoint.X += pointList[i].X;
                avgPoint.Y += pointList[i].Y;


                /*if (maxX < pointList.ElementAt(i).X)
                    maxX = pointList.ElementAt(i).X;

                else if (maxY < pointList.ElementAt(i).Y)
                    maxY = pointList.ElementAt(i).Y;*/
                
            }

            avgPoint.X = avgPoint.X / pointList.Count();
            avgPoint.Y = avgPoint.Y / pointList.Count();
            /*avgPoint.X = maxX;
            avgPoint.Y = maxY;*/


            return avgPoint;
        }
        private void button3_Click(object sender, EventArgs e)
        {
            surfImage = surf();
            surfResult.Image = surfImage;
        }

        private void button4_Click(object sender, EventArgs e)
        {
            AlignmentImages(surfPoint, homographyMatrix);

            CvInvoke.cvSaveImage("demo.bmp", surfImage.Ptr);
        }
    }
}
