﻿using BaseTool;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;

namespace Airthmetic
{
    /// <summary>
    /// 标定工具
    /// </summary>
    public class CalibrationTool
    {
        /// <summary> 标定参数 </summary>
        public CalibrationParam Param = new CalibrationParam();
        /// <summary> 内角参数 </summary>
        public CornerData CornerData = new CornerData();
        /// <summary> 标定结果 </summary>
        public CalibrationResult Result = new CalibrationResult();

        /// <summary> 标定时间，触发一次更新一次 </summary>
        private DateTime CalibrationTime = DateTime.Now;

        /// <summary> 不确定的 </summary>
        public const ushort Bit_Uncertain = 0xffff;

        #region 参数读取&保存
        /// <summary>
        /// 读取标定参数
        /// </summary>
        /// <param name="CalibrationXml">读取路径，空为Alltype中的配置</param>
        /// <returns></returns>
        public bool Read(string CalibrationXml = null)
        {
            try
            {
                string path = string.IsNullOrWhiteSpace(CalibrationXml) ? PathTool.CalibrationParamXml : CalibrationXml;
                if (!File.Exists(path))
                {
                    Param = new CalibrationParam();
                    Save();
                }

                Param = (CalibrationParam)FileTool.ReadXML(path, typeof(CalibrationParam));
                Log.SaveLog($"读取标定工具参数成功", LogType.Run, Color.Black);
                return true;
            }
            catch (Exception ex)
            {
                Log.SaveError(ex, $"读取标定工具参数异常，{ex.Message}");
                return false;
            }
        }

        /// <summary>
        /// 写入标定参数
        /// </summary>
        /// <param name="CalibrationXml"></param>
        /// <returns></returns>
        public bool Save(string CalibrationXml = null)
        {
            try
            {
                string path = string.IsNullOrWhiteSpace(CalibrationXml) ? PathTool.CalibrationParamXml : CalibrationXml;
                bool isok = FileTool.WriteXML(path, Param);
                Log.SaveLog($"保存标定工具参数{(isok ? "成功" : "失败")}", LogType.Run, isok ? Color.Black : Color.Red);
                return isok;
            }
            catch (Exception ex)
            {
                Log.SaveError(ex, $"保存标定工具参数异常，{ex.Message}");
                return false;
            }
        }
        #endregion

        /// <summary>
        /// 入口:标定
        /// </summary>
        /// <param name="input">传入参数</param>
        public void Calibration(CalibrationInput input)
        {
            string msg = "";
            try
            {
                //总运行时间
                Stopwatch spTotal = Stopwatch.StartNew();
                long spLastms = 0;

                #region 初始化
                //标定时间初始化
                CalibrationTime = DateTime.Now;

                //标定保存路径  Debug\Data\Calibration\年月日_时分秒\
                string SavePath = $"{PathTool.DataCalibrationPath}{CalibrationTime.ToString("yyyyMMdd_HHmmss")}\\";

                //设置进度条
                Machine.StatusBarAction?.Invoke(-1, "", Color.Black);
                //内角参数初始化
                CornerData.Init();
                //标定结果初始化
                Result.Init();
                #endregion

                #region 数据加载 + 预处理
                //// 1.加载图像 + 预处理
                //Machine.StatusBarAction?.Invoke(0, "数据预处理中...", Color.Black);
                //Log.SaveLog($"标定：预处理图像耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run,Color.Black);
                //spLastms = spTotal.ElapsedMilliseconds;

                //数据不足判断
                if (input.capImageCount < 3)
                {
                    msg = $"标定失败：预处理图像数组不足3，只有{input.capImageCount}组";
                    Log.SaveLog(msg, LogType.Run, Color.Red);
                    Machine.StatusBarAction?.Invoke(1, msg, Color.Red);
                    return;
                }

                //2.创建标定文件夹 + 复制预处理图像至标定图像文件夹下
                Machine.StatusBarAction?.Invoke(0.05, $"数据预处理中...", Color.Black);
                CopyImage(input, SavePath);
                Log.SaveLog($"标定：复制图像耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;

                #endregion

                #region 角点提取
                //3.提取棋盘格角点
                Machine.StatusBarAction?.Invoke(0.1, $"角点提取中...", Color.Black);
                ExtractChessboardCorners(input, Param, CornerData);
                //数据不足判断
                if (input.finImageCount < 3)
                {
                    msg = $"标定失败：提取棋盘格角点失败，成功{input.finImageCount}组，失败{input.capImageCount - input.finImageCount}组";
                    Log.SaveLog(msg, LogType.Run, Color.Red);
                    Machine.StatusBarAction?.Invoke(1, msg, Color.Red);
                    return;
                }

                Log.SaveLog($"标定：提取棋盘格角点耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;

                //4.计算投影仪角点
                Machine.StatusBarAction?.Invoke(0.3, $"计算投影仪角点中...", Color.Black);
                MeasureProjector(input, Param, CornerData, SavePath);
                //数据不足判断
                if (input.finImageCount < 3)
                {
                    msg = $"标定失败：计算投影仪角点失败，成功{input.finImageCount}组，失败{input.capImageCount - input.finImageCount}组";
                    Log.SaveLog(msg, LogType.Run, Color.Red);
                    Machine.StatusBarAction?.Invoke(1, msg, Color.Red);
                    return;
                }

                Log.SaveLog($"标定：计算投影仪角点耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;
                #endregion

                /*【CalibrationFlags】
                 * 参数用于指定标定时的一些选项。在 OpenCV 中，有几个预定义的标志可以用于这个参数，你可以单独使用它们或者将它们组合在一起，通过位运算符（|）将它们连接起来。
                 * 
                 * CALIB_USE_INTRINSIC_GUESS
                 * 使用传入的 cameraMatrix 作为内参的初始猜测。如果你已经有了相机的大致内参估计，可以使用这个标志来加速标定过程。
                 * 
                 * CALIB_FIX_ASPECT_RATIO
                 * 固定纵横比，不允许在标定过程中调整相机的纵横比。如果你知道相机的纵横比是固定的，可以使用这个标志。
                 * 
                 * CALIB_FIX_PRINCIPAL_POINT
                 * 固定主点（图像中心点）的位置，不允许在标定过程中调整主点的坐标。
                 * 
                 * CALIB_ZERO_TANGENT_DIST
                 * 设置切向畸变系数为零，即不对切向畸变进行标定。
                 * 
                 * CALIB_FIX_K1 - CALIB_FIX_K6
                 * 分别固定径向畸变系数 k1、k2、k3、k4、k5 和 k6，不允许在标定过程中调整这些畸变系数
                 * 
                 * CALIB_RATIONAL_MODEL
                 * 使用有理模型来建模畸变系数。这通常用于广角相机的标定。
                 * 
                 * CALIB_THIN_PRISM_MODEL
                 * 使用薄棱镜模型来建模畸变系数。这同样也是用于广角相机的标定。
                */

                /*【TermCriteria】
                 * 参数用于控制标定迭代的终止条件。这个参数是一个元组，其中包含两个子参数：(type, maxCount, epsilon)。
                 * 
                 * [type]
                 * 终止标定迭代的准则类型。
                 * 
                 * TERM_CRITERIA_EPS
                 * 基于精度终止迭代。当标定的精度达到 epsilon 时，迭代将终止。
                 * 
                 * TERM_CRITERIA_MAX_ITER
                 * 基于最大迭代次数终止迭代。当标定的迭代次数达到 maxCount 时，迭代将终止。
                 * 
                 * [maxCount]
                 * 最大迭代次数。当选择基于最大迭代次数终止迭代时，这个值将指定最大的迭代次数。
                 * 
                 * [epsilon]
                 * 精度阈值。当选择基于精度终止迭代时，这个值将指定达到的精度。
                 * 
                */

                #region 5 标定相机
                Machine.StatusBarAction?.Invoke(0.7, $"标定相机中...", Color.Black);
                //摄像机内参数矩阵
                double[,] _cam_K = new double[3, 3];
                //摄像机的5个畸变系数：k1,k2,p1,p2,k3 
                double[] _cam_kc = new double[5];
                //存放所有图像的3*1旋转向量，每一副图像的旋转向量为一个mat
                //旋转向量的数组，每个向量对应于 objectPoints 和 imagePoints 之间的一个平移关系。
                Vec3d[] _cam_rvecs;
                //存放所有图像的3*1平移向量，每一副图像的平移向量为一个mat
                //平移向量的数组，每个向量对应于 objectPoints 和 imagePoints 之间的一个平移关系。
                Vec3d[] _cam_tvecs;
                //指定标定时的一些选项
                CalibrationFlags _cam_calflags = CalibrationFlags.FixK3;
                //迭代标准
                TermCriteria _cam_criteria1 = new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.Count, 50, double.Epsilon);

                double cam_error = Cv2.CalibrateCamera(
                    CornerData.CornerWorld,
                    CornerData.CornerCamera,
                    Param.CameraSize,
                    _cam_K,
                    _cam_kc,
                    out _cam_rvecs,
                    out _cam_tvecs,
                    _cam_calflags,
                    _cam_criteria1
                    );

                Result.cam_k = ImageTool.GetDouble2D(_cam_K);
                Result.cam_kc = _cam_kc;
                Result.cam_error = cam_error;

                Log.SaveLog($"标定：标定相机耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;
                #endregion

                #region 6 标定投影仪
                Machine.StatusBarAction?.Invoke(0.8, $"标定投影仪中...", Color.Black);
                //投影仪内参数矩阵
                double[,] _proj_K = new double[3, 3];
                //投影仪的5个畸变系数：k1,k2,p1,p2,k3 
                double[] _proj_kc = new double[5];
                //存放所有图像的3*1旋转向量，每一副图像的旋转向量为一个mat
                //旋转向量的数组，每个向量对应于 objectPoints 和 imagePoints 之间的一个平移关系。
                Vec3d[] _proj_rvecs;
                //存放所有图像的3*1平移向量，每一副图像的平移向量为一个mat
                //平移向量的数组，每个向量对应于 objectPoints 和 imagePoints 之间的一个平移关系。
                Vec3d[] _proj_tvecs;
                //指定标定时的一些选项
                CalibrationFlags _proj_calflags = CalibrationFlags.FixK3;
                //迭代标准
                TermCriteria _proj_criteria1 = new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.Count, 50, double.Epsilon);

                double proj_error = Cv2.CalibrateCamera(
                    CornerData.CornerWorld,
                    CornerData.CornerProjector,
                    Param.ProjectorSize,
                    _proj_K,
                    _proj_kc,
                    out _proj_rvecs,
                    out _proj_tvecs,
                    _proj_calflags,
                    _proj_criteria1
                    );

                Result.proj_k = ImageTool.GetDouble2D(_proj_K);
                Result.proj_kc = _proj_kc;
                Result.proj_error = proj_error;

                Log.SaveLog($"标定：标定投影仪耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;
                #endregion

                #region 7 系统校准（立体标定）
                Machine.StatusBarAction?.Invoke(0.9, $"系统校准中...", Color.Black);
                TermCriteria _stereo_criteria = new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.Count, 150, double.Epsilon);
                CalibrationFlags _stereo_calflags = CalibrationFlags.FixK3 | CalibrationFlags.FixIntrinsic;

                Mat R = new Mat();
                Mat T = new Mat();
                Mat E = new Mat();
                Mat F = new Mat();
                double stereo_error = Cv2.StereoCalibrate(
                    CornerData.CornerWorld, CornerData.CornerCamera, CornerData.CornerProjector,
                    _cam_K, _cam_kc, _proj_K, _proj_kc,
                    Param.CameraSize, R, T, E, F,
                    _stereo_calflags, _stereo_criteria);

                Result.R = ImageTool.GetDouble2D(R);
                Result.T = ImageTool.GetDouble(T);
                Result.E = ImageTool.GetDouble2D(E);
                Result.F = ImageTool.GetDouble2D(F);
                Result.stereo_error = stereo_error;
                Log.SaveLog($"标定：系统校准耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;
                #endregion

                /*
                *[Corner_World]    Count:3  Type:Point3f[] 
                *[Corner_Cam]      Count:3    Type:Point2f[] 
                *[Corner_Proj]     Count:3   Type:Point2f[] 
                *[cam_K]   (Rows, Cols):(3, 3)   Type:CV_64FC1 
                *[cam_kc]  (Rows, Cols):(1, 5) Type:CV_64FC1 
                *[proj_K]  (Rows, Cols):(3, 3) Type:CV_64FC1 
                *[proj_kc] (Rows, Cols):(1, 5) Type:CV_64FC1 
                *[R]       (Rows, Cols):(3, 3) Type:CV_64FC1 
                *[T]       (Rows, Cols):(3, 1) Type:CV_64FC1 
                *[E]       (Rows, Cols):(3, 3) Type:CV_64FC1 
                *[F]       (Rows, Cols):(3, 3) Type:CV_64FC1 
                */

                #region 8 保存xml和ini数据
                Machine.StatusBarAction?.Invoke(0.95, $"保存数据中...", Color.Black);
                string XmlPath = $"{SavePath}Xml\\";
                if (!Directory.Exists(XmlPath))
                    Directory.CreateDirectory(XmlPath);

                //8.1 保存标定结果
                Result.isOK = true;
                Result.Param = (CalibrationParam)FileTool.CloneClass(Param);
                FileTool.WriteXML($"{XmlPath}CalibrationResult.xml", Result);

                //8.2 保存标定参数(已在结果保存)
                //Save($"{XmlPath}CalibrationParam.xml");
                //FileTool.WriteXML($"{XmlPath}CalibrationParam.xml", Write);

                //8.3 保存标定板数据
                //FileTool.WriteXML($"{XmlPath}PatternParam.xml", MyGlobal.patternData);

                //8.4 保存角点数据
                //FileTool.WriteXML($"{XmlPath}CornerData.xml", CornerData);
                {
                    StringBuilder cornerMsg = new StringBuilder();
                    for (int i = 0; i < _cam_rvecs.Count(); i++)
                    {
                        cornerMsg.AppendLine($"[{i + 1}]" +
                             (_cam_rvecs.Count() > i && _cam_rvecs[i] != null ? $" cr({_cam_rvecs[i].Item0}, {_cam_rvecs[i].Item1}, {_cam_rvecs[i].Item2})" : " cr empty") +
                             (_cam_tvecs.Count() > i && _cam_tvecs[i] != null ? $" ct({_cam_tvecs[i].Item0}, {_cam_tvecs[i].Item1}, {_cam_tvecs[i].Item2})" : " ct empty") +
                             (_proj_rvecs.Count() > i && _proj_rvecs[i] != null ? $" pr({_proj_rvecs[i].Item0}, {_proj_rvecs[i].Item1}, {_proj_rvecs[i].Item2})" : " pr empty") +
                             (_proj_tvecs.Count() > i && _proj_tvecs[i] != null ? $" pt({_proj_tvecs[i].Item0}, {_proj_tvecs[i].Item1}, {_proj_tvecs[i].Item2})" : " pt empty")
                             );
                    }
                    cornerMsg.AppendLine();
                    int sum = 0;
                    for (int i = 0; i < CornerData.CornerCamera.Count; i++)
                    {
                        for (int j = 0; j < CornerData.CornerCamera[i].Count(); j++)
                        {
                            cornerMsg.AppendLine($"[{sum + 1}]" +
                                (CornerData.CornerCamera[i][j] != null ? $" C ({CornerData.CornerCamera[i][j].X}, {CornerData.CornerCamera[i][j].Y})" : " C empty") +
                                (CornerData.CornerProjector[i][j] != null ? $" P ({CornerData.CornerProjector[i][j].X}, {CornerData.CornerProjector[i][j].Y})" : " P empty") +
                                (CornerData.CornerWorld[i][j] != null ? $" W ({CornerData.CornerWorld[i][j].X}, {CornerData.CornerWorld[i][j].Y}, {CornerData.CornerWorld[i][j].Z})" : " W empty")
                                );
                            sum++;
                        }
                    }
                    FileTool.WriteTxt($"{XmlPath}CornerData.txt", cornerMsg.ToString());
                }

                //8.5 保存标定信息
                //string IniPath = XmlPath + "Index.ini";
                //FileTool.WriteIni(IniPath, "Calibration", "CaptureCount", input.capImageCount);
                //FileTool.WriteIni(IniPath, "Calibration", "PretreatmentCount", input.preImageCount);
                //FileTool.WriteIni(IniPath, "Calibration", "FinishCount", input.finImageCount);
                //string CapturePath = "";
                //foreach (var file in input.capImagePath) { CapturePath += $"\n{file}"; }
                //FileTool.WriteIni(IniPath, "Calibration", "CapturePath[]", CapturePath);
                //FileTool.WriteIni(IniPath, "Calibration", "CalibrationTime", PathTool.GetDateTimeFolderName(CalibrationTime));
                //FileTool.WriteIni(IniPath, "Calibration", "CalibrationTicks", CalibrationTime.Ticks.ToString());
                //FileTool.WriteIni(IniPath, "Calibration", "CalibrationPath", SavePath);

                Log.SaveLog($"标定：保存数据耗时：{spTotal.ElapsedMilliseconds - spLastms}ms", LogType.Run, Color.Black);
                spLastms = spTotal.ElapsedMilliseconds;
                #endregion

                spTotal.Stop();
                msg = $"标定完成，总耗时：{spTotal.ElapsedMilliseconds}ms。\n相机误差：{cam_error}\n投影仪误差：{proj_error}\n系统误差：{stereo_error}";
                Log.SaveLog(msg, LogType.Run, Color.Black);
                Machine.StatusBarAction?.Invoke(1, $"标定完成，总耗时：{spTotal.ElapsedMilliseconds}ms  系统误差：{stereo_error}", Color.Black);
            }
            catch (Exception ex)
            {
                Log.SaveError(ex, $"标定异常：{ex.Message}");
                Machine.StatusBarAction?.Invoke(1, "标定异常", Color.Red);
            }
        }

        #region 辅助方法
        /// <summary>
        /// 2.创建标定文件夹 + 复制预处理图像至标定图像文件夹下
        /// </summary>
        /// <param name="input">传入输入参数</param>
        /// <param name="SavePath">标定图像文件夹</param>
        private static void CopyImage(CalibrationInput input, string SavePath)
        {
            string ImageSavePath = SavePath + "Image\\";

            if (!Directory.Exists(ImageSavePath))
                Directory.CreateDirectory(ImageSavePath);

            int Count = input.capImageCount;
            for (int i = 0; i < Count; i++)
            {
                string indexName = (i + 1).ToString().PadLeft(2, '0');
                //先复制原始第二张图
                DirectoryInfo OriDir = new DirectoryInfo(input.capImagePath[i]);
                foreach (FileInfo OriFile in OriDir.GetFiles())
                {
                    if (!(OriFile.Extension == ".bmp" ||
                        OriFile.Extension == ".jpg" ||
                        OriFile.Extension == ".png"))
                        continue;
                    if (!OriFile.Name.Contains("01"))
                        continue;

                    OriFile.CopyTo(ImageSavePath + indexName + OriFile.Extension);
                    break;
                }

                //复制预处理图像
                string ticksPath = ImageSavePath + indexName + "\\";
                Directory.CreateDirectory(ticksPath);
                foreach (string pf in Directory.GetFiles(input.capImagePath[i]))
                {
                    FileInfo PreFile = new FileInfo(pf);
                    if (!(PreFile.Extension == ".bmp" ||
                        PreFile.Extension == ".jpg" ||
                        PreFile.Extension == ".png"))
                        continue;
                    PreFile.CopyTo(ticksPath + PreFile.Name);
                }
                input.preImage2Path.Add(ticksPath);

                Log.SaveLog($"复制图像[{i + 1}/{Count}]完成", LogType.Run, Color.Black);
            }
        }

        /// <summary>
        /// 3.提取棋盘格角点
        /// </summary>
        /// <param name="input">传入输入参数</param>
        /// <param name="param">传入可改参数</param>
        /// <param name="data">传入角点参数</param>
        public static void ExtractChessboardCorners(CalibrationInput input, CalibrationParam param, CornerData data)
        {
            int Count = input.capImageCount;
            for (int i = 0; i < Count; i++)
            {
                DirectoryInfo dir = new DirectoryInfo(input.preImage2Path[i]);
                FileInfo[] fis = dir.GetFiles();

                bool isok = false;
                Point2f[] point = new Point2f[0];

                foreach (FileInfo fi in fis)
                {
                    try
                    {
                        Mat mat = Cv2.ImRead(fi.FullName, ImreadModes.Grayscale);
                        //if (mat.Type() == MatType.CV_8UC3)
                        //    Cv2.CvtColor(mat, mat, ColorConversionCodes.BGR2GRAY);

                        //1 新版FindChessboardCornersSB
                        /*
                         * AdaptiveThresh
                         * 使用自适应阈值化将图像转换为黑白图像，而不是固定的阈值水平(从平均图像亮度计算)。
                         * 
                         * NormalizeImage
                         * 在应用固定或自适应阈值之前，使用cvNormalizeHist对图像进行归一化。
                         * 
                         * FilterQuads
                         * 使用额外的标准(如轮廓面积、周长、类似正方形的形状)来过滤在轮廓检索阶段提取的虚假四边形。
                         * 
                         * FastCheck
                         * 对图像进行快速检查，寻找棋盘角点，如果没有找到，则使用快捷方式调用。在观察不到棋盘的退化情况下，这可以大大加快调用速度。
                         * 
                         * Exhaustive
                         * 运行穷举搜索以提高检测率。
                         * 
                         * Accuracy
                         * 对输入图像进行采样，以提高由于混叠效应引起的亚像素精度。如果需要精确的相机校准，则应使用此方法。
                         */

                        if (Cv2.FindChessboardCornersSB(
                            mat,
                            param.CornerCount,
                            out point,
                            //ChessboardFlags.AdaptiveThresh|ChessboardFlags.FilterQuads)) //Ti
                            //ChessboardFlags.AdaptiveThresh | ChessboardFlags.NormalizeImage))//布朗大学
                            ChessboardFlags.Exhaustive | ChessboardFlags.Accuracy))
                        {
                            isok = true;
                            break;
                        }

                        ////2.布朗大学旧版FindChessboardCorners + CornerSubPix
                        //if (Cv2.FindChessboardCorners(
                        //        mat,
                        //        param.CornerCount,
                        //        out point,
                        //        ChessboardFlags.AdaptiveThresh | ChessboardFlags.NormalizeImage))//布朗大学
                        //{
                        //    Cv2.CornerSubPix(mat, point,
                        //            new OpenCvSharp.Size(11, 11),
                        //            new OpenCvSharp.Size(-1, -1),
                        //            new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.MaxIter, 30, 0.1));
                        //    isok = true;
                        //    break;
                        //}

                        ////3 Ti旧版FindChessboardCorners + CornerSubPix
                        //if (Cv2.FindChessboardCorners(
                        //        mat,
                        //        PatternCount,
                        //        out point,
                        //        ChessboardFlags.AdaptiveThresh | ChessboardFlags.FilterQuads))
                        //{
                        //    Cv2.CornerSubPix(mat, point,
                        //    new OpenCvSharp.Size(11, 11),
                        //    new OpenCvSharp.Size(-1, -1),
                        //    new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.MaxIter, 30, 0.1));
                        //    break;
                        //}
                    }
                    catch (Exception ex)
                    {
                        Log.SaveError(ex, $"提取棋盘格角点[{i + 1}/{Count}]异常，地址：{fi.FullName}，异常：{ex.Message}", LogType.Run);
                    }
                }

                if (isok)
                {
                    input.finImageFlag.Add(true);
                    data.CornerCamera.Add(point);
                    Log.SaveLog($"提取棋盘格角点[{i + 1}/{Count}]完成", LogType.Run, Color.Black);
                }
                else
                {
                    input.finImageFlag.Add(false);
                    Log.SaveLog($"提取棋盘格角点[{i + 1}/{Count}]失败", LogType.Run, Color.Red);
                }
            }
        }

        /// <summary>
        /// 4.计算投影仪角点(含图像解码)
        /// </summary>
        /// <param name="input"></param>
        /// <param name="param"></param>
        /// <param name="data"></param>
        /// <param name="SavePath"></param>
        public static void MeasureProjector(CalibrationInput input, CalibrationParam param, CornerData data, string SavePath)
        {
            int WINDOW_SIZE = param.HomographyWindow / 2;
            int Threshold = param.Threshold;
            int Count = input.capImageCount;

            Mat[] _pattern_list = new Mat[Count];
            Mat[] _min_max_list = new Mat[Count];
            Point2f[][] _proj_corners = new Point2f[Count][];

            int completedThreads = 0;
            //for (int i = 0; i < Count; i++)
            ParallelLoopResult result = Parallel.For(0, Count, (int i) =>
            {
                try
                {
                    if (!input.finImageFlag[i])
                    {
                        _pattern_list[i] = null;
                        _min_max_list[i] = null;
                        _proj_corners[i] = new Point2f[0];
                        //continue;
                        return;
                    }

                    Mat pattern_image;
                    Mat min_max_image;

                    Point2f[] P_cam_corners = data.CornerCamera[i];
                    List<Point2f> P_proj_corners = new List<Point2f>();

                    //4.1 投影图解码
                    DecodeGraySet(input.preImage2Path[i],
                        input.PatternCount, input.HorizontalDepth, input.VerticalDepth,
                        param.Threshold, param.BlackLightPowerRatio, param.MinDirectLightComponent,
                        param.CameraSize, param.ProjectorSize,
                        out pattern_image, out min_max_image);

                    //4.2 遍历所有角点，在单应性窗口中计算homography
                    for (int ci = 0; ci < P_cam_corners.Length; ci++)
                    {
                        Point2f p = P_cam_corners[ci];
                        Point2f q;

                        //find an homography around p
                        if (p.X > WINDOW_SIZE &&
                                p.Y > WINDOW_SIZE &&
                                p.X + WINDOW_SIZE < pattern_image.Cols &&
                                p.Y + WINDOW_SIZE < pattern_image.Rows)
                        {
                            List<Point2f> temp_img_points = new List<Point2f>();
                            List<Point2f> temp_proj_points = new List<Point2f>();

                            for (int h = (int)p.Y - WINDOW_SIZE; h < p.Y + WINDOW_SIZE; h++)
                            {
                                for (int w = (int)p.X - WINDOW_SIZE; w < p.X + WINDOW_SIZE; w++)
                                {
                                    Vec2f pattern = pattern_image.At<Vec2f>(h, w);
                                    Vec2b min_max = min_max_image.At<Vec2b>(h, w);

                                    if (ImageTool.INVALID(pattern))
                                        continue;
                                    if ((min_max[1] - min_max[0]) < Threshold)
                                        continue;

                                    temp_img_points.Add(new Point2f(w, h));
                                    temp_proj_points.Add(new Point2f(pattern.Item0, pattern.Item1));
                                }
                            }

                            Mat H = Cv2.FindHomography(InputArray.Create<Point2f>(temp_img_points), InputArray.Create<Point2f>(temp_proj_points), HomographyMethods.Ransac);
                            Mat _temp = new Mat(3, 1, MatType.CV_64FC1);
                            _temp.Set<double>(0, 0, p.X);
                            _temp.Set<double>(1, 0, p.Y);
                            _temp.Set<double>(2, 0, 1.0);

                            Mat _temp2 = H * _temp;
                            double _X = _temp2.At<double>(0, 0);
                            double _Y = _temp2.At<double>(1, 0);
                            double _Z = _temp2.At<double>(2, 0);
                            q = new Point2f((float)(_X / _Z), (float)(_Y / _Z));
                        }
                        else
                            continue;

                        P_proj_corners.Add(q);
                    }

                    _pattern_list[i] = pattern_image;
                    _min_max_list[i] = min_max_image;
                    _proj_corners[i] = P_proj_corners.ToArray();
                    Log.SaveLog($"第{i + 1}组数据解码完成", LogType.Run, Color.Black);
                }
                catch (Exception ex)
                {
                    Log.SaveError(ex, $"第{i + 1}组数据解码异常，{ex.Message}", LogType.Run);

                    input.finImageFlag[i] = false;

                    _pattern_list[i] = null;
                    _min_max_list[i] = null;
                    _proj_corners[i] = new Point2f[0];
                }
                finally
                {
                    Interlocked.Increment(ref completedThreads);
                    Machine.StatusBarAction?.Invoke((int)((double)completedThreads / (double)Count * 40 + 30), $"计算投影仪角点中... {completedThreads}/{Count}", Color.Black);
                }
                //}
            });

            //添加数据（如果不为空）
            for (int i = 0; i < Count; i++)
            {
                if (!input.finImageFlag[i])
                    continue;
                data.CornerProjector.Add(_proj_corners[i]);
                data.patternImage.Add(_pattern_list[i]);
                data.minMaxImage.Add(_min_max_list[i]);
            }
            data.GenerateObjectPoints(Count, param.CornerCount, param.PatternSize);
        }

        /// <summary>
        /// 4.1 投影图解码 
        /// </summary>
        /// <param name="matPath">传入mat数据地址</param>
        /// <param name="PatternCount">条纹光栅图片数量</param>
        /// <param name="HorizontalDepth">条纹光栅垂直向深度</param>
        /// <param name="VerticalDepth">条纹光栅水平向深度</param>
        /// <param name="Threshold">二值化阈值</param>
        /// <param name="BlackLightPowerRatio">黑光功率比</param>
        /// <param name="MinDirectLightComponent">最小直射光分量</param>
        /// <param name="CameraSize"></param>
        /// <param name="ProjectorSize"></param>
        /// <param name="pattern_image">解码图</param>
        /// <param name="min_max_image">最大最小图</param>
        public static void DecodeGraySet(string matPath, int PatternCount, int HorizontalDepth, int VerticalDepth,
            int Threshold, double BlackLightPowerRatio, byte MinDirectLightComponent,
            OpenCvSharp.Size CameraSize, OpenCvSharp.Size ProjectorSize,
            out Mat pattern_image, out Mat min_max_image)
        {
            //double BlackLightPowerRatio = Write.BlackLightPowerRatio;
            //byte MinDirectLightComponent = Write.MinDirectLightComponent;
            //float Point2f CV_32FC2
            pattern_image = null;
            //byte vec2d CV_8UC2
            min_max_image = null;

            Stopwatch sw = Stopwatch.StartNew();

            //提取数据
            List<string> pathsList = Directory.GetFiles(matPath).ToList();
            if (pathsList.Count > PatternCount)
            {
                for (int i = pathsList.Count - 1; i >= 0; i--)
                {
                    FileInfo PreFile = new FileInfo(pathsList[i]);
                    if ((!PreFile.Name.Contains("Pre")) ||
                        !(PreFile.Extension == ".bmp" ||
                        PreFile.Extension == ".jpg" ||
                        PreFile.Extension == ".png"))
                    {
                        pathsList.RemoveAt(i);
                    }
                }
            }
            if (pathsList.Count != PatternCount)
                throw new Exception($"DecodeGraySet: Wrong number of images; only {pathsList.Count} of {PatternCount} is needed");

            //估计直接分量
            List<string> pathsListEstimateDirectCcomponent = new List<string>();
            for (int i = 0; i < 4; i++)
            {
                // 0 1
                // 2-21
                // 22-41
                //int index1 = 16 + i;
                //int index2 = index1 + 20;
                int index1 = 2 + HorizontalDepth * 2 - 6 + i;
                int index2 = index1 + VerticalDepth * 2;
                pathsListEstimateDirectCcomponent.Add(pathsList[index1]);
                pathsListEstimateDirectCcomponent.Add(pathsList[index2]);
            }

            //4.1.1 估计直射光 CV_8UC2
            Mat direct_light = estimate_direct_light(pathsListEstimateDirectCcomponent, BlackLightPowerRatio, CameraSize);

            //4.1.2 解码图案
            bool rv = decode_pattern(
                pathsList,
                direct_light,
                HorizontalDepth,
                VerticalDepth,
                out pattern_image,
                out min_max_image,
                ProjectorSize,
                MinDirectLightComponent);

            //4.1.3 获取垂直水平向解码图
            Mat col_image, row_image;
            GetNewPattern(pattern_image, min_max_image, out col_image, out row_image, Threshold);

            Mat cc = new Mat(col_image.Rows, col_image.Cols, MatType.CV_8UC1);
            Mat rr = new Mat(row_image.Rows, row_image.Cols, MatType.CV_8UC1);
            col_image.ConvertTo(cc, MatType.CV_8UC1, 1 / 255.0);
            row_image.ConvertTo(rr, MatType.CV_8UC1, 1 / 255.0);

            #region 显示&保存图像
            /*
            *d1[DirectImageA] (Rows, Cols):(2048, 2448) Type: CV_8UC1
            *d2[DirectImageB](Rows, Cols):(2048, 2448) Type: CV_8UC1
            *p1[PatternImageA](Rows, Cols):(2048, 2448) Type: CV_32FC1
            *p2[PatternImageB](Rows, Cols):(2048, 2448) Type: CV_32FC1
            *m1[MinMaxImageA](Rows, Cols):(2048, 2448) Type: CV_8UC1
            *m2[MinMaxImageB](Rows, Cols):(2048, 2448) Type: CV_8UC1
            *[ColImage](Rows, Cols):(2048, 2448) Type: CV_32FC1
            *[RowImage](Rows, Cols):(2048, 2448) Type: CV_32FC1
            */
            //保存图像
            DirectoryInfo mdir = new DirectoryInfo(matPath);
            string _SavePath = mdir.Parent.Parent.FullName + "\\DecodeImage\\" + mdir.Name + "\\";

            //ImageTool.SaveMat(_SavePath, "1_DirectImage", direct_light);
            //ImageTool.SaveMat(_SavePath, "2_PatternImage", pattern_image);
            //ImageTool.SaveMat(_SavePath, "3_MinMaxImage", min_max_image);
            //ImageTool.SaveMat(_SavePath, "4_ColImage", col_image);
            //ImageTool.SaveMat(_SavePath, "5_RowImage", row_image);

            ImageTool.SaveMat(_SavePath, "Tiff_1_DirectImage", direct_light, true);
            ImageTool.SaveMat(_SavePath, "Tiff_2_PatternImage", pattern_image, true);
            ImageTool.SaveMat(_SavePath, "Tiff_3_MinMaxImage", min_max_image, true);
            ImageTool.SaveMat(_SavePath, "Tiff_4_ColImage", col_image, true);
            ImageTool.SaveMat(_SavePath, "Tiff_5_RowImage", row_image, true);

            #endregion

            Log.SaveLog($"解码成功，{mdir.FullName}，耗时：{sw.ElapsedMilliseconds}ms", LogType.Run, Color.Black);
        }

        #region 4.1 投影图解码
        /// <summary>
        /// 4.1.1 估计直射光
        /// </summary>
        /// <param name="pathsList"></param>
        /// <param name="b"></param>
        /// <param name="cameraSize"></param>
        /// <returns></returns>
        private static Mat estimate_direct_light(List<string> pathsList, double b, OpenCvSharp.Size cameraSize)
        {
            //读取8张图像做估计直射光
            List<Mat> images = new List<Mat>();
            foreach (string path in pathsList)
            {
                images.Add(Cv2.ImRead(path, ImreadModes.Grayscale));
            }

            //初始化直射光图像
            Mat direct_light = new Mat(cameraSize, MatType.CV_8UC2);

            double b1 = 1.0 / (1.0 - b);
            double b2 = 2.0 / (1.0 - b * b);

            int count = images.Count;
            for (int h = 0; h < cameraSize.Height; h++)
            {
                for (int w = 0; w < cameraSize.Width; w++)
                {
                    byte temp = images[0].At<byte>(0, w);
                    byte Lmax = temp;
                    byte Lmin = temp;
                    for (int i = 1; i < count; i++)
                    {
                        temp = images[i].At<byte>(h, w);
                        if (Lmax < temp) Lmax = temp;
                        if (Lmin > temp) Lmin = temp;
                    }

                    int Ld = (int)(b1 * ((double)Lmax - (double)Lmin) + 0.5);
                    int Lg = (int)(b2 * ((double)Lmin - b * (double)Lmax) + 0.5);
                    Vec2b vec2B = Lg > 0 ? new Vec2b((byte)Ld, (byte)Lg) : new Vec2b(Lmax, (byte)0);
                    direct_light.Set<Vec2b>(h, w, vec2B);
                }
            }
            return direct_light;
        }

        /// <summary>
        /// 4.1.2 解码图案
        /// </summary>
        /// <param name="images_names"></param>
        /// <param name="direct_light"></param>
        /// <param name="HorizontalDepth"></param>
        /// <param name="VerticalDepth"></param>
        /// <param name="pattern_image"></param>
        /// <param name="min_max_image"></param>
        /// <param name="ProjectorSize"></param>
        /// <param name="m"></param>
        /// <returns></returns>
        private static bool decode_pattern(
            List<string> images_names, Mat direct_light, int HorizontalDepth, int VerticalDepth,
            out Mat pattern_image, out Mat min_max_image, OpenCvSharp.Size ProjectorSize, byte m)
        {
            //OpenCvSharp.Size ProjectorSize = Write.ProjectorSize;

            pattern_image = new Mat();
            min_max_image = new Mat();
            bool init = true;

            int total_images = images_names.Count;      //42
            //int total_patterns = total_images / 2 - 1;  //20
            //int total_bits = total_patterns / 2;        //10

            //位模式 pattern bits
            int[] bit_count = { 0, HorizontalDepth, VerticalDepth };    // 0 10 10
            //图像对数 number of image pairs
            int[] set_size = { 1, HorizontalDepth, VerticalDepth };     //1 10 10 
            //总图像计数 total image count
            int COUNT = 2 * (set_size[0] + set_size[1] + set_size[2]);  //42
            int[] pattern_offset = {
                ((1 << bit_count[1]) - ProjectorSize.Width) / 2, // (2^10-W)/2
                ((1 << bit_count[2]) - ProjectorSize.Height) / 2  // (2^10-H)/2
            };

            //加载每个图像对并计算最大值、最小值和位码
            for (int t = 0, current = 0, set = 0;
                t < COUNT;
                t += 2, current++)
            {
                // set 0 跳过全黑全白
                // set 1 开始采集垂直向图片
                // set 2 开始采集水平向图片
                if (current == set_size[set])
                {
                    set++;
                    current = 0;
                }
                if (set == 0)
                {   //skip
                    continue;
                }
                int bit = bit_count[set] - current - 1; //current bit: from 0 to (bit_count[set]-1)
                int channel = set - 1;

                //加载图像 
                Mat gray_image1 = Cv2.ImRead(images_names[t + 0]);
                if (gray_image1.Rows < 1)
                    return false;
                Mat gray_image2 = Cv2.ImRead(images_names[t + 1]);
                if (gray_image2.Rows < 1)
                    return false;

                //初始化数据结构
                if (init)
                {
                    //float Point2f
                    pattern_image = new Mat(gray_image1.Size(), MatType.CV_32FC2);
                    //byte vec2d
                    min_max_image = new Mat(gray_image1.Size(), MatType.CV_8UC2);
                }

                //安全检查
                OpenCvSharp.Size gray_image1_size = gray_image1.Size();
                OpenCvSharp.Size gray_image2_size = gray_image2.Size();
                OpenCvSharp.Size pattern_image_size = pattern_image.Size();
                if (gray_image1_size.Height != pattern_image_size.Height ||
                    gray_image1_size.Width != pattern_image_size.Width)
                    continue;
                if (gray_image2_size.Height != pattern_image_size.Height ||
                    gray_image2_size.Width != pattern_image_size.Width)
                    continue;

                //比较

                //[鲁棒] 模式位分配
                for (int h = 0; h < pattern_image.Rows; h++)
                {
                    for (int w = 0; w < pattern_image.Cols; w++)
                    {
                        Vec2f pattern = pattern_image.At<Vec2f>(h, w);
                        Vec2b min_max = min_max_image.At<Vec2b>(h, w);
                        byte value1 = gray_image1.At<byte>(h, w);
                        byte value2 = gray_image2.At<byte>(h, w);

                        if (init)
                        {
                            pattern[0] = (float)0.0; //vertical
                            pattern[1] = (float)0.0; //horizontal
                            pattern_image.Set<Vec2f>(h, w, pattern);
                        }
                        //min/max
                        bool is_change_min_max = false;
                        if (init || value1 < min_max[0] || value2 < min_max[0])
                        {
                            min_max[0] = value1 < value2 ? value1 : value2;
                            is_change_min_max = true;
                        }
                        if (init || value1 > min_max[1] || value2 > min_max[1])
                        {
                            min_max[1] = value1 > value2 ? value1 : value2;
                            is_change_min_max = true;
                        }

                        if (is_change_min_max)
                            min_max_image.Set<Vec2b>(h, w, min_max);

                        //if (robust)
                        //{
                        //[鲁棒] 模式位分配
                        if (init || pattern[channel] != float.NaN)
                        {
                            Vec2b L = direct_light.At<Vec2b>(h, w);
                            ushort p = get_robust_bit(value1, value2, L[0], L[1], m);
                            if (p == Bit_Uncertain)
                            {
                                pattern[channel] = float.NaN;
                                pattern_image.Set<Vec2f>(h, w, pattern);
                            }
                            else
                            {
                                pattern[channel] += (p << bit);
                                pattern_image.Set<Vec2f>(h, w, pattern);
                            }
                        }
                        //}
                        //else
                        //{
                        //    //[简单] 模式位分配
                        //    if (value1 > value2)
                        //    {
                        //        pattern[channel] += (1 << bit);
                        //        pattern_image.Set<Vec2f>(h, w, pattern);
                        //    }
                        //}
                    }
                }

                init = false;
            }

            //不是二进制..它必须是灰度图格式
            convert_pattern(pattern_image, ProjectorSize, pattern_offset, false);

            return true;
        }

        /// <summary>
        /// 4.1.3 获取垂直水平向解码图
        /// </summary>
        /// <param name="pattern_image"></param>
        /// <param name="min_max_image"></param>
        /// <param name="col_image"></param>
        /// <param name="row_image"></param>
        /// <param name="Threshold"></param>
        private static void GetNewPattern(Mat pattern_image, Mat min_max_image, out Mat col_image, out Mat row_image, int Threshold)
        {
            //int Threshold = Write.Threshold;

            col_image = new Mat();
            row_image = new Mat();
            Vec2f Vec2fNaN = new Vec2f(float.NaN, float.NaN);
            Mat pattern_image_new = new Mat(pattern_image.Size(), pattern_image.Type());
            for (int h = 0; h < pattern_image.Rows; h++)
            {
                for (int w = 0; w < pattern_image.Cols; w++)
                {
                    Vec2f pattern = pattern_image.At<Vec2f>(h, w);
                    Vec2b min_max = min_max_image.At<Vec2b>(h, w);
                    if (ImageTool.INVALID(pattern) || (min_max[1] - min_max[0]) < Threshold)
                        pattern_image_new.Set<Vec2f>(h, w, Vec2fNaN);
                    else
                        pattern_image_new.Set<Vec2f>(h, w, pattern);
                }
            }
            col_image = pattern_image_new.ExtractChannel(0);
            row_image = pattern_image_new.ExtractChannel(1);
        }

        /// <summary> 4.1.2.1 获取鲁棒的比特 </summary>
        private static ushort get_robust_bit(byte value1, byte value2, byte Ld, byte Lg, byte m)
        {
            if (Ld < m)
                return Bit_Uncertain;
            if (Ld > Lg)
                return value1 > value2 ? (byte)1 : (byte)0;
            if (value1 <= Ld && value2 >= Lg)
                return (byte)0;
            if (value1 >= Lg && value2 <= Ld)
                return (byte)1;
            return Bit_Uncertain;
        }
        /// <summary> 4.1.2.2 转换模式 </summary>
        private static void convert_pattern(Mat pattern_image, OpenCvSharp.Size projector_size, int[] offset, bool binary)
        {
            if (pattern_image.Rows == 0) return;
            if (pattern_image.Type() != MatType.CV_32FC2) return;

            //binary = true Converting binary code to gray
            //binary = false Converting gray code to binary
            for (int h = 0; h < pattern_image.Rows; h++)
            {
                for (int w = 0; w < pattern_image.Cols; w++)
                {
                    Vec2f pattern = pattern_image.At<Vec2f>(h, w);

                    if (binary)
                    {
                        if (!ImageTool.INVALID(pattern[0]))
                            pattern[0] = ImageTool.BinaryToGray((int)pattern[0], offset[0]) + (pattern[0] - (int)pattern[0]);
                        if (!ImageTool.INVALID(pattern[1]))
                            pattern[1] = ImageTool.BinaryToGray((int)pattern[1], offset[1]) + (pattern[1] - (int)pattern[1]);
                    }
                    else
                    {
                        if (!ImageTool.INVALID(pattern[0]))
                        {
                            int code = ImageTool.GrayToBinary((int)pattern[0], offset[0]);
                            if (code < 0)
                                code = 0;
                            else if (code >= projector_size.Width)
                                code = projector_size.Width - 1;

                            pattern[0] = code + (pattern[0] - (int)pattern[0]);
                        }
                        if (!ImageTool.INVALID(pattern[1]))
                        {
                            int code = ImageTool.GrayToBinary((int)pattern[1], offset[1]);
                            if (code < 0)
                                code = 0;
                            else if (code >= projector_size.Height)
                                code = projector_size.Height - 1;

                            pattern[1] = code + (pattern[1] - (int)pattern[1]);
                        }
                    }
                    pattern_image.At<Vec2f>(h, w) = pattern;
                }
            }
        }
        #endregion
        #endregion
    }
}
