#include <opencv2/stitching/detail/blenders.hpp>
#include <opencv2/stitching/detail/camera.hpp>
#include <opencv2/core.hpp>
// #include <opencv2/stitching.hpp>
#include "my_stitching.hpp"

#include <iostream>

#define LOGLN(message) std::cout << message << std::endl

class Camera {
public:
    cv::Mat K;    // Intrinsic matrix
    double focal; // Focal length
    cv::Mat R;    // Rotation matrix
};


namespace cv
{
    std::vector<detail::CameraParams> cameras_1(2);

    Ptr<Stitcher> Stitcher::create(Mode mode)
    {
        Ptr<Stitcher> stitcher = makePtr<Stitcher>();

        stitcher->setRegistrationResol(0.6);
        stitcher->setSeamEstimationResol(0.1);
        stitcher->setCompositingResol(ORIG_RESOL);
        stitcher->setPanoConfidenceThresh(1);
        stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
        stitcher->setBlender(makePtr<detail::MultiBandBlender>(true));//GPU
        stitcher->setFeaturesFinder(ORB::create());
        stitcher->setInterpolationFlags(INTER_LINEAR);

        stitcher->work_scale_ = 1;
        stitcher->seam_scale_ = 1;
        stitcher->seam_work_aspect_ = 1;
        stitcher->warped_image_scale_ = 1;

        switch (mode)
        {
        case PANORAMA: // PANORAMA is the default
            // mostly already setup
            stitcher->setEstimator(makePtr<detail::HomographyBasedEstimator>());
            stitcher->setWaveCorrection(true);
            stitcher->setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
            stitcher->setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(true));//GPU
            stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
            stitcher->setWarper(makePtr<SphericalWarper>());
            stitcher->setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
            break;

        case SCANS:
            stitcher->setEstimator(makePtr<detail::AffineBasedEstimator>());
            stitcher->setWaveCorrection(false);
            stitcher->setFeaturesMatcher(makePtr<detail::AffineBestOf2NearestMatcher>(false, true));//GPU
            stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterAffinePartial>());
            stitcher->setWarper(makePtr<AffineWarper>());
            stitcher->setExposureCompensator(makePtr<detail::NoExposureCompensator>());
            break;

        default:
            CV_Error(Error::StsBadArg, "Invalid stitching mode. Must be one of Stitcher::Mode");
            break;
        }

        return stitcher;
    }

    Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks)
    {

        images.getUMatVector(imgs_);
        masks.getUMatVector(masks_);

        Status status;
        // 注释开始
        if ((status = matchImages()) != OK)
            return status;

        if ((status = estimateCameraParams()) != OK)
            return status;

        return OK;
    }

    Stitcher::Status Stitcher::composePanorama(OutputArray pano)
    {

        return composePanorama(std::vector<UMat>(), pano);
    }

    Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
    {
        std::vector<UMat> imgs;
        images.getUMatVector(imgs);
        if (!imgs.empty())
        {
            CV_Assert(imgs.size() == imgs_.size());

            UMat img;
            seam_est_imgs_.resize(imgs.size());

            for (size_t i = 0; i < imgs.size(); ++i)
            {
                imgs_[i] = imgs[i];
                resize(imgs[i], img, Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
                seam_est_imgs_[i] = img.clone();
            }

            std::vector<UMat> seam_est_imgs_subset;
            std::vector<UMat> imgs_subset;

            for (size_t i = 0; i < indices_.size(); ++i)
            {
                imgs_subset.push_back(imgs_[indices_[i]]);
                seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
            }

            seam_est_imgs_ = seam_est_imgs_subset;
            imgs_ = imgs_subset;
        }

        UMat pano_;

#if ENABLE_LOG
    int64 t = getTickCount();
#endif

    std::vector<Point> corners(imgs_.size());
    std::vector<UMat> masks_warped(imgs_.size());
    std::vector<UMat> images_warped(imgs_.size());
    std::vector<Size> sizes(imgs_.size());
    std::vector<UMat> masks(imgs_.size());

    // Prepare image masks
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        masks[i].create(seam_est_imgs_[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks
    Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
    for (size_t i = 0; i < imgs_.size(); ++i)
    {
        Mat_<float> K;
        cameras_1[i].K().convertTo(K, CV_32F);
                Mat_<float> R;
        cameras_1[i].R.convertTo(R, CV_32F);
        K(0,0) *= (float)seam_work_aspect_;
        K(0,2) *= (float)seam_work_aspect_;
        K(1,1) *= (float)seam_work_aspect_;
        K(1,2) *= (float)seam_work_aspect_;

        corners[i] = w->warp(seam_est_imgs_[i], K, R, interp_flags_, BORDER_REFLECT, images_warped[i]);
        sizes[i] = images_warped[i].size();

        w->warp(masks[i], K, R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }



    // Compensate exposure before finding seams
    exposure_comp_->feed(corners, images_warped, masks_warped);
    for (size_t i = 0; i < imgs_.size(); ++i)
        exposure_comp_->apply(int(i), corners[i], images_warped[i], masks_warped[i]);

    // Find seams
    //缝隙
    std::vector<UMat> images_warped_f(imgs_.size());
    for (size_t i = 0; i < imgs_.size(); ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);
    seam_finder_->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    // seam_est_imgs_.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

#if ENABLE_LOG
    t = getTickCount();
#endif

    UMat img_warped, img_warped_s;
    UMat dilated_mask, seam_mask, mask, mask_warped;

    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;
    bool is_blender_prepared = false;

    double compose_scale = 1;
    bool is_compose_scale_set = false;

    std::vector<detail::CameraParams> cameras_scaled(cameras_1);

    UMat full_img, img;
    for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
    {
#if ENABLE_LOG
        int64 compositing_t = getTickCount();
#endif

        // Read image and resize it if necessary
        full_img = imgs_[img_idx];
        if (!is_compose_scale_set)
        {
            if (compose_resol_ > 0)
                compose_scale = std::min(1.0, std::sqrt(compose_resol_ * 1e6 / full_img.size().area()));
            is_compose_scale_set = true;

            // Compute relative scales
            //compose_seam_aspect = compose_scale / seam_scale_;
            compose_work_aspect = compose_scale / work_scale_;

            // Update warped image scale
            float warp_scale = static_cast<float>(warped_image_scale_ * compose_work_aspect);
            w = warper_->create(warp_scale);

            // Update corners and sizes
            for (size_t i = 0; i < imgs_.size(); ++i)
            {
                // Update intrinsics
                cameras_scaled[i].ppx *= compose_work_aspect;
                cameras_scaled[i].ppy *= compose_work_aspect;
                cameras_scaled[i].focal *= compose_work_aspect;

                // Update corner and size
                Size sz = full_img_sizes_[i];
                if (std::abs(compose_scale - 1) > 1e-1)
                {
                    sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
                    sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
                }

                Mat K;
                cameras_scaled[i].K().convertTo(K, CV_32F);
                Mat_<float> R;
                cameras_scaled[i].R.convertTo(R, CV_32F);
                Rect roi = w->warpRoi(sz, K, R);
                corners[i] = roi.tl();
                sizes[i] = roi.size();
            }
        }
        if (std::abs(compose_scale - 1) > 1e-1)
        {
#if ENABLE_LOG
            int64 resize_t = getTickCount();
#endif
            resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
        }
        else
            img = full_img;
        full_img.release();
        Size img_size = img.size();


        Mat K;
        cameras_scaled[img_idx].K().convertTo(K, CV_32F);

#if ENABLE_LOG
        int64 pt = getTickCount();
#endif
        // Warp the current image
                        Mat_<float> R;
                cameras_1[img_idx].R.convertTo(R, CV_32F);
        w->warp(img, K, R, interp_flags_, BORDER_REFLECT, img_warped);
        // std::cout<<R<<std::endl;
#if ENABLE_LOG
        pt = getTickCount();
#endif

        // Warp the current image mask
        mask.create(img_size, CV_8U);
        mask.setTo(Scalar::all(255));
                                // Mat_<float> R;
                cameras_1[img_idx].R.convertTo(R, CV_32F);
        w->warp(mask, K, R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
#if ENABLE_LOG
        pt = getTickCount();
#endif

        // Compensate exposure
        exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
#if ENABLE_LOG
        pt = getTickCount();
#endif

        img_warped.convertTo(img_warped_s, CV_16S);
        img_warped.release();
        img.release();
        mask.release();

        // Make sure seam mask has proper size
        dilate(masks_warped[img_idx], dilated_mask, Mat());
        resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);

        bitwise_and(seam_mask, mask_warped, mask_warped);

#if ENABLE_LOG
        pt = getTickCount();
#endif

        if (!is_blender_prepared)
        {
            blender_->prepare(corners, sizes);
            is_blender_prepared = true;
        }


#if ENABLE_LOG
        int64 feed_t = getTickCount();
#endif
        // Blend the current image
        //图像融合
        blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
    }

#if ENABLE_LOG
        int64 blend_t = getTickCount();
#endif
    UMat result;
    blender_->blend(result, result_mask_);

    // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
    // so convert it to avoid user confusing
    result.convertTo(pano, CV_8U);

    return OK;
}


Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
{
    std::cout<<"********************skip111*********************"<<std::endl;
    return stitch(images, noArray(), pano);
}


Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano)
{
    
    Status status = estimateTransform(images, masks);
    if (status != OK)
        return status;
    return composePanorama(pano);
}


Stitcher::Status Stitcher::matchImages()
{    
    std::cout<<"********************22222222*********************"<<std::endl;
    if ((int)imgs_.size() < 2)
    {
        return ERR_NEED_MORE_IMGS;
    }
    // 如果第一次匹配的结果为空，或者需要重新匹配
    if (seam_est_imgs_.empty())
    {
        std::cout<<"********************skip333empty*********************"<<std::endl;
        // ... 之前的代码，执行特征匹配等操作

        work_scale_ = 1;
        seam_work_aspect_ = 1;
        seam_scale_ = 1;
        bool is_work_scale_set = false;
        bool is_seam_scale_set = false;
        features_.resize(imgs_.size());
        seam_est_imgs_.resize(imgs_.size());
        full_img_sizes_.resize(imgs_.size());

#if ENABLE_LOG
        int64 t = getTickCount();
#endif

        std::vector<UMat> feature_find_imgs(imgs_.size());
        std::vector<UMat> feature_find_masks(masks_.size());

        for (size_t i = 0; i < imgs_.size(); ++i)
        {
            full_img_sizes_[i] = imgs_[i].size();
            if (registr_resol_ < 0)
            {
                feature_find_imgs[i] = imgs_[i];
                work_scale_ = 1;
                is_work_scale_set = true;
            }
            else
            {
                if (!is_work_scale_set)
                {
                    work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
                    is_work_scale_set = true;
                }
                resize(imgs_[i], feature_find_imgs[i], Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
            }
            if (!is_seam_scale_set)
            {
                seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
                seam_work_aspect_ = seam_scale_ / work_scale_;
                is_seam_scale_set = true;
            }

            if (!masks_.empty())
            {
                resize(masks_[i], feature_find_masks[i], Size(), work_scale_, work_scale_, INTER_NEAREST);
            }
            features_[i].img_idx = (int)i;

            resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
        }

        // find features possibly in parallel
        detail::computeImageFeatures(features_finder_, feature_find_imgs, features_, feature_find_masks);

        // Do it to save memory
        feature_find_imgs.clear();
        feature_find_masks.clear();

        // LOG("Pairwise matching");
#if ENABLE_LOG
        t = getTickCount();
#endif
        (*features_matcher_)(features_, pairwise_matches_, matching_mask_);

        // Log the pairwise matches
        // 匹配信息

        features_matcher_->collectGarbage();
        // Leave only images we are sure are from the same panorama
        //     for (size_t i = 0; i < pairwise_matches_.size(); ++i)
        // {
        //     const cv::detail::MatchesInfo &match_info = pairwise_matches_[i];
        //     std::cout << "Pairwise match #" << i << ":\n";
        //     std::cout << "  src_img_idx: " << match_info.src_img_idx << "\n";
        //     std::cout << "  dst_img_idx: " << match_info.dst_img_idx << "\n";
        //     std::cout << "  num_inliers: " << match_info.num_inliers << "\n";
        //     // ... add more information as needed
        //     std::cout << "\n";
        // }

        indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
        std::vector<UMat> seam_est_imgs_subset;
        std::vector<UMat> imgs_subset;
        std::vector<Size> full_img_sizes_subset;
        for (size_t i = 0; i < indices_.size(); ++i)
        {
            imgs_subset.push_back(imgs_[indices_[i]]);
            seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
            full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
        }
        seam_est_imgs_ = seam_est_imgs_subset;
        imgs_ = imgs_subset;
        full_img_sizes_ = full_img_sizes_subset;

        // // 保存第一次匹配的结果
        // first_full_img_sizes_ = full_img_sizes;
        // first_imgs_ = imgs_;
        // first_seam_est_imgs_ = seam_est_imgs_;
    }
    else{
        std::cout<<"********************skip*********************"<<std::endl;
    }
    // else
    // {
    //     // 直接使用保存的第一次匹配的结果
    //     std::vector<cv::Size> full_img_sizes_;
    //     std::vector<cv::UMat> imgs_;
    //     std::vector<cv::UMat> seam_est_imgs_;
    //     seam_est_imgs_ = first_seam_est_imgs_;
    //     imgs_ = first_imgs_;
    //     full_img_sizes_ = first_full_img_sizes_;
    // }

    // if ((int)imgs_.size() < 2)
    // {
    //     return ERR_NEED_MORE_IMGS;
    // }

    return OK;
}



//估计相机参数
Stitcher::Status Stitcher::estimateCameraParams()
{
    //注释开始************************
    // estimate homography in global frame
    // if (!(*estimator_)(features_, pairwise_matches_, cameras_))
    //     return ERR_HOMOGRAPHY_EST_FAIL;

    // for (size_t i = 0; i < cameras_.size(); ++i)
    // {
    //     Mat R;
    //     cameras_[i].R.convertTo(R, CV_32F);
    //     cameras_[i].R = R;
    //     //LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
    // }

    // for (size_t i = 0; i < cameras_.size(); ++i)
    // {
    //     Mat R = cameras_[i].R;
    //     std::cout << "Homography Matrix for Image #" << indices_[i] + 1 << ":\n"
    //               << R << "\n\n";
    // }

    // bundle_adjuster_->setConfThresh(conf_thresh_);
    // if (!(*bundle_adjuster_)(features_, pairwise_matches_, cameras_))
    //     return ERR_CAMERA_PARAMS_ADJUST_FAIL;

    // // Find median focal length and use it as final image scale
    // std::vector<double> focals;
    // for (size_t i = 0; i < cameras_.size(); ++i)
    // {
    //     //LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
    //     focals.push_back(cameras_[i].focal);
    // }

    // std::sort(focals.begin(), focals.end());
    // if (focals.size() % 2 == 1)
    //     warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
    // else
    //     warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

    // if (do_wave_correct_)
    // {
    //     std::cout << "do_wave_correct_" << std::endl;
    //     std::vector<Mat> rmats;
    //     for (size_t i = 0; i < cameras_.size(); ++i)
    //         rmats.push_back(cameras_[i].R.clone());
    //     detail::waveCorrect(rmats, wave_correct_kind_);
    //     for (size_t i = 0; i < cameras_.size(); ++i)
    //         cameras_[i].R = rmats[i];
    // }
    // 注释结束****************

    // for (size_t i = 0; i < cameras_.size(); ++i)
    // {
    //     cv::Mat K_mat = cameras_[i].K;
    //     LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << " (type: " << K_mat.type() << "):\n"
    //                                          << K_mat);
    //     LOGLN("Final focal length for camera #" << indices_[i] + 1 << " (type: " << CV_64F << "): " << cameras_[i].focal);
    //     LOGLN("Final rotation matrix for camera #" << indices_[i] + 1 << " (type: " << cameras_[i].R.type() << "):\n"
    //                                                << cameras_[i].R);
    //      LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << " (type: " << K_mat.type() << "):\n"
    //                                          << K_mat);
    //     LOGLN("Final focal length for camera #" << indices_[i] + 1 << " (type: " << CV_64F << "): " << cameras_[i].focal);
    //     LOGLN("Final rotation matrix for camera #" << indices_[i] + 1 << " (type: " << cameras_[i].R.type() << "):\n"
    //                                                << cameras_[i].R);
    // }

//     for (size_t i = 0; i < cameras_.size(); ++i)
// {
//             cv::Mat K_mat = cameras_[i].K();

//         LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << " (type: " << K_mat.type() << "):\n"
//                                              << K_mat);
//     LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << ":\n"
//                                          << "Focal Length: " << cameras_[i].focal << "\n"
//                                          << "Aspect Ratio: " << cameras_[i].aspect << "\n"
//                                          << "Principal Point X: " << cameras_[i].ppx << "\n"
//                                          << "Principal Point Y: " << cameras_[i].ppy << "\n");

//     LOGLN("Final rotation matrix for camera #" << indices_[i] + 1 << " (type: " << cameras_[i].R.type() << "):\n"
//                                                << cameras_[i].R);

//     LOGLN("Final translation vector for camera #" << indices_[i] + 1 << " (type: " << cameras_[i].t.type() << "):\n"
//                                                     << cameras_[i].t);
// }

//添加
// For Camera #1
cv::Mat R1 = (cv::Mat_<double>(3, 3) << 0.82741368, 0.027497945, -0.56091928,
                                      1.1543859e-09, 0.99880046, 0.048964202,
                                      0.56159288, -0.040513646, 0.8264212);

cv::Mat t1 = (cv::Mat_<double>(3, 1) << 0, 0, 0);

cv::Mat K1 = (cv::Mat_<double>(3, 3) << 448.4400098227603, 0, 516.5,
                                       0, 448.4400098227603, 290.5,
                                       0, 0, 1);

// For Camera #2
cv::Mat R2 = (cv::Mat_<double>(3, 3) << 0.82786888, -0.0016199998, 0.56091923,
                                      1.5589343e-08, 0.99999577, 0.0028880751,
                                      -0.56092167, -0.0023909314, 0.82786536);

cv::Mat t2 = (cv::Mat_<double>(3, 1) << 0, 0, 0);

cv::Mat K2 = (cv::Mat_<double>(3, 3) << 448.4400098227603, 0, 516.5,
                                       0, 448.4400098227603, 290.5,
                                       0, 0, 1);

    cameras_1[0].R = R1;
    cameras_1[0].t = t1;
    cameras_1[0].focal = 448.4400098227603;
    cameras_1[0].aspect = 1.0;  // Assuming aspect is 1 based on your previous examples
    cameras_1[0].ppx = 516.5;
    cameras_1[0].ppy = 290.5;

    cameras_1[1].R = R2;
    cameras_1[1].t = t2;
    cameras_1[1].focal = 448.4400098227603;
    cameras_1[1].aspect = 1.0;  // Assuming aspect is 1 based on your previous examples
    cameras_1[1].ppx = 516.5;
    cameras_1[1].ppy = 290.5;

    warped_image_scale_=466.222;


//     for (size_t i = 0; i < cameras_1.size(); ++i)
// {
//             cv::Mat K_mat = cameras_1[i].K();

//         LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << " (type: " << K_mat.type() << "):\n"
//                                              << K_mat);
//     LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << ":\n"
//                                          << "Focal Length: " << cameras_1[i].focal << "\n"
//                                          << "Aspect Ratio: " << cameras_1[i].aspect << "\n"
//                                          << "Principal Point X: " << cameras_1[i].ppx << "\n"
//                                          << "Principal Point Y: " << cameras_1[i].ppy << "\n");

//     LOGLN("Final rotation matrix for camera #" << indices_[i] + 1 << " (type: " << cameras_1[i].R.type() << "):\n"
//                                                << cameras_1[i].R);

//     LOGLN("Final translation vector for camera #" << indices_[i] + 1 << " (type: " << cameras_1[i].t.type() << "):\n"
//                                                     << cameras_1[i].t);
// }

    // for (size_t i = 0; i < cameras_1.size(); ++i)
    // {
    //     cv::Mat K_mat = cameras_1[i].K();
    //     LOGLN("Final intrinsic parameters #" << indices_[i] + 1 << " (type: " << K_mat.type() << "):\n"
    //                                          << K_mat);
    //     LOGLN("Final focal length for camera #" << indices_[i] + 1 << " (type: " << CV_64F << "): " << cameras_1[i].focal);
    //     LOGLN("Final rotation matrix for camera #" << indices_[i] + 1 << " (type: " << cameras_1[i].R.type() << "):\n"
    //                                                << cameras_1[i].R);
    // }
    // LOGLN("Final warped image scale: " << warped_image_scale_);

    return OK;
}

// Stitcher::Status Stitcher::setTransform(InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras)
// {
//     std::vector<int> component;
//     for (int i = 0; i < (int)images.total(); i++)
//         component.push_back(i);

//     return setTransform(images, cameras, component);
// }


// Stitcher::Status Stitcher::setTransform(
//         InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras, const std::vector<int> &component)
// {
// //    CV_Assert(images.size() == cameras.size());

//     images.getUMatVector(imgs_);
//     masks_.clear();

//     if ((int)imgs_.size() < 2)
//     {
//         return ERR_NEED_MORE_IMGS;
//     }

//     work_scale_ = 1;
//     seam_work_aspect_ = 1;
//     seam_scale_ = 1;
//     bool is_work_scale_set = false;
//     bool is_seam_scale_set = false;
//     seam_est_imgs_.resize(imgs_.size());
//     full_img_sizes_.resize(imgs_.size());


//     for (size_t i = 0; i < imgs_.size(); ++i)
//     {
//         full_img_sizes_[i] = imgs_[i].size();
//         if (registr_resol_ < 0)
//         {
//             work_scale_ = 1;
//             is_work_scale_set = true;
//         }
//         else
//         {
//             if (!is_work_scale_set)
//             {
//                 work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
//                 is_work_scale_set = true;
//             }
//         }
//         if (!is_seam_scale_set)
//         {
//             seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
//             seam_work_aspect_ = seam_scale_ / work_scale_;
//             is_seam_scale_set = true;
//         }

//         resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
//     }

//     features_.clear();
//     pairwise_matches_.clear();

//     indices_ = component;
//     std::vector<UMat> seam_est_imgs_subset;
//     std::vector<UMat> imgs_subset;
//     std::vector<Size> full_img_sizes_subset;
//     for (size_t i = 0; i < indices_.size(); ++i)
//     {
//         imgs_subset.push_back(imgs_[indices_[i]]);
//         seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
//         full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
//     }
//     seam_est_imgs_ = seam_est_imgs_subset;
//     imgs_ = imgs_subset;
//     full_img_sizes_ = full_img_sizes_subset;

//     if ((int)imgs_.size() < 2)
//     {
//         return ERR_NEED_MORE_IMGS;
//     }

//     cameras_1 = cameras;

//     std::vector<double> focals;
//     for (size_t i = 0; i < cameras.size(); ++i)
//         focals.push_back(cameras_1[i].focal);

//     std::sort(focals.begin(), focals.end());
//     if (focals.size() % 2 == 1)
//         warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
//     else
//         warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

//     return Status::OK;
// }


CV_DEPRECATED Ptr<Stitcher> createStitcher(bool /*ignored*/)
{
    
    return Stitcher::create(Stitcher::PANORAMA);
}

CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool /*ignored*/)
{
    
    return Stitcher::create(Stitcher::SCANS);
}
} // namespace cv
