﻿#include "YoloV1.h"
#include "tinyxml2.h"
#include "opencv.hpp"
#include <iostream>
#include <fstream>

using namespace std;
using namespace cv;
using torch::nn::MaxPool2dOptions;
using torch::nn::DropoutOptions;
using torch::nn::BatchNorm2dOptions;
using torch::nn::Conv2dOptions;
using torch::nn::LeakyReLUOptions;

#define DIV_1_7 (1.0f / 7)
#define MUL_7 (7)

float calcScaleRatio(const Size2f& size, const Size2f& bound)
{
    float sx = bound.width / size.width;
    float sy = bound.height / size.height;
    float ss = std::min(sx, sy);
    return ss;
}

Mat scaleKeepRatio(const Mat& input, const Size& bound)
{
    float ratio = calcScaleRatio(input.size(), bound);
    Size dest((int)(input.cols * ratio), (int)(input.rows * ratio));
    Mat rescale;
    resize(input, rescale, dest);
    Mat image = Mat::zeros(bound, CV_8UC3);
    rescale.copyTo(image(Rect(0, 0, dest.width, dest.height)));
    return image;
}

Curve::Curve(int rows, int cols)
{
    miny = 0;
    maxy = 100;
    minx = 0;
    maxx = 1000;
    curve = Mat(rows, cols, CV_8UC3, Scalar(255, 255, 255));
    contentRegion = Rect(35, 10, cols - 45, rows - 20);
    rectangle(curve, contentRegion, Scalar(223, 127, 34));
    paintYAxis();
}

void Curve::paintYAxis()
{
    for (int i = 0; i <= 5; i++)
    {
        int y = int(contentRegion.y + (1.0f - 1.0f * i / 5) * contentRegion.height);
        float value = miny + (1.0f * i / 5) * (maxy - miny);
        char strValue[32] = { 0 };
        sprintf_s(strValue, "%4.1f", value);
        putText(curve, strValue, Point(1, y + 4), 0, 0.35, Scalar(223, 127, 34));
        line(curve, Point(contentRegion.x, y), Point(contentRegion.x + 5, y), Scalar(223, 127, 34));
    }
}

void Curve::show()
{
    imshow("LOSS折线图", curve);
    waitKey(1);
}

void Curve::append(const Point2f& pt)
{
    float xper = (pt.x - minx) / (maxx - minx);
    float yper = (pt.y - miny) / (maxy - miny);
    int x = int(contentRegion.x + xper * contentRegion.width);
    int y = int(contentRegion.y + (1 - yper) * contentRegion.height);
    if (x >= 0 && x < curve.cols && y >= 0 && y < curve.rows)
    {
        curve.at<Vec3b>(y, x) = Vec3b(0, 0, 0);
    }
    show();
}

void Curve::append(float x, float y)
{
    append(Point2f(x, y));
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

MConv2dImpl::MConv2dImpl(int input1, int input2, int kernel, int stride, int padding) : 
    conv1(nullptr), normal2(nullptr), relu3(nullptr)
{
    conv1 = Conv2d(Conv2dOptions(input1, input2, kernel).stride(stride).padding(padding));
    normal2 = BatchNorm2d(BatchNorm2dOptions(input2));
    relu3 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv1 = register_module("conv1", conv1);
    normal2 = register_module("normal2", normal2);
    relu3 = register_module("relu3", relu3);
}

Tensor MConv2dImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    x = normal2->forward(x);
    x = relu3->forward(x);
    return x;
}

MDoubleConv2dImpl::MDoubleConv2dImpl(int input1, int input2) : 
    conv1(nullptr), conv2(nullptr)
{
    conv1 = MConv2d(input2, input1, 1, 1, 0);
    conv2 = MConv2d(input1, input2, 3, 1, 1);
    conv1 = register_module("sub-conv1", conv1);
    conv2 = register_module("sub-conv2", conv2);
}

Tensor MDoubleConv2dImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    x = conv2->forward(x);
    return x;
}

YoloImpl::YoloImpl(int classes)
{
    feature->push_back(MConv2d(3, 64, 7, 2, 3));
    feature->push_back(MaxPool2d(MaxPool2dOptions(2).stride(2)));
    feature->push_back(MConv2d(64, 192, 3, 1, 1));
    feature->push_back(MaxPool2d(MaxPool2dOptions(2).stride(2)));
    feature->push_back(MConv2d(192, 128, 1, 1));
    feature->push_back(MConv2d(128, 256, 3, 1, 1));
    feature->push_back(MConv2d(256, 256, 1, 1));
    feature->push_back(MConv2d(256, 512, 3, 1, 1));
    feature->push_back(MaxPool2d(MaxPool2dOptions(2).stride(2)));
    feature->push_back(MDoubleConv2d(256, 512));
    feature->push_back(MDoubleConv2d(256, 512));
    feature->push_back(MDoubleConv2d(256, 512));
    feature->push_back(MDoubleConv2d(256, 512));
    feature->push_back(MConv2d(512, 512, 1, 1));
    feature->push_back(MConv2d(512, 1024, 3, 1, 1));
    feature->push_back(MaxPool2d(MaxPool2dOptions(2).stride(2)));
    feature->push_back(MDoubleConv2d(512, 1024));
    feature->push_back(MDoubleConv2d(512, 1024));
    feature->push_back(MConv2d(1024, 1024, 3, 1, 1));
    feature->push_back(MConv2d(1024, 1024, 3, 2, 1));
    feature->push_back(MConv2d(1024, 1024, 3, 1, 1));
    feature->push_back(MConv2d(1024, 1024, 3, 1, 1));
    classifier->push_back(Linear(1024 * 7 * 7, 4096));
    classifier->push_back(LeakyReLU(LeakyReLUOptions().negative_slope(0.1)));
    classifier->push_back(Dropout());
    classifier->push_back(Linear(4096, 30 * 7 * 7));
    classifier->push_back(Sigmoid()); /* 原文无此层 */

    feature = register_module("feature", feature);
    classifier = register_module("classifier", classifier);
}

Tensor YoloImpl::forward(Tensor x)
{
    x = feature->forward(x);
    x = torch::flatten(x, 1);
    x = classifier->forward(x);
    int pages = x.size(0);
    x = reshape(x, { pages, MUL_7 * MUL_7, 30 });
    return x;
}

bool YoloImpl::predict(Tensor x, vector<ObjectFound>& output)
{
    Tensor result = forward(x);
    vector<ObjectFound> founds;
    outputToVector(result, founds);
    nms(founds, 0.3f, output);
    return !output.empty();
}

void YoloImpl::outputToVector(Tensor input, vector<ObjectFound>& objects)
{
    input.squeeze_(0);
    int rows = input.size(0);
    for (int i = 0; i < rows; i++)
    {
        ObjectFound temp;
        Tensor index = torch::arange(0, 20, c10::kCUDA);
        Tensor maxv = input[i].index_select(0, index).argmax(0);
        temp.category = maxv.item<int>();

        float conf = input[i][20].item<float>();
        float cx = (i % MUL_7 + input[i][21].item<float>()) * DIV_1_7 * 448;
        float cy = (i / MUL_7 + input[i][22].item<float>()) * DIV_1_7 * 448;
        float rw = input[i][23].item<float>() * 448;
        float rh = input[i][24].item<float>() * 448;
        temp.bound.x = cx - 0.5f * rw;
        temp.bound.y = cy - 0.5f * rh;
        temp.bound.width = rw;
        temp.bound.height = rh;
        temp.confidence = conf;
        objects.push_back(temp);

        conf = input[i][25].item<float>();
        cx = (i % MUL_7 + input[i][26].item<float>()) * DIV_1_7 * 448;
        cy = (i / MUL_7 + input[i][27].item<float>()) * DIV_1_7 * 448;
        rw = input[i][28].item<float>() * 448;
        rh = input[i][29].item<float>() * 448;
        temp.bound.x = cx - 0.5f * rw;
        temp.bound.y = cy - 0.5f * rh;
        temp.bound.width = rw;
        temp.bound.height = rh;
        temp.confidence = conf;
        objects.push_back(temp);
    }
}

//---------------------------------------------------------------------------------------
// 局部极大值抑制。网上有些极大值抑制是按分类进行的。即先把同一类的结果放一块，然后分
// 别对每一类进行极大值抑制。这样可以允许两个不同类的物体位置比较接近。我这里没有分类
// 直接混在一块进行极大值抑制了
// objects：输入所有结果
// thres：分数阈值
// wells：抑制之后的输出
//---------------------------------------------------------------------------------------
void YoloImpl::nms(vector<ObjectFound>& objects, float thres, vector<ObjectFound>& wells)
{
    std::sort(objects.begin(), objects.end(), 
        [](const ObjectFound& a, const ObjectFound& b) { return a.confidence > b.confidence; });
    if (objects.empty() || objects[0].confidence < thres)
    {
        return; /* 没有合适的结果 */
    }
    vector<ObjectFound> filtered(1, objects.front());
    for (auto& item : objects)
    {
        bool good = std::all_of(filtered.begin(), filtered.end(), 
            [this, item](const ObjectFound& x) { return calcIntersect(x.bound, item.bound) <= 0.1f; });
        if (good && item.confidence >= thres)
        {
            filtered.push_back(item);
        }
    }
    wells = std::move(filtered);
}

float YoloImpl::calcIntersect(Rect2f a, Rect2f b)
{
    float arx = a.x + a.width;
    float ary = a.y + a.height;
    float brx = b.x + b.width;
    float bry = b.y + b.height;
    float crossx = std::max(a.x, b.x);
    float crossy = std::max(a.y, b.y);
    float crossw = std::min(arx, brx) - crossx;
    float crossh = std::min(ary, bry) - crossy;
    if (crossw > 0 && crossh > 0)
    {
        float scross = crossw * crossh;
        float sa = a.width * a.height;
        float sb = b.width * b.height;
        return scross / (sa + sb - scross);
    }
    return 0;
}

/////////////////////////////////////////////////////////////////////////////////////////

MDiyLossImpl::MDiyLossImpl()
{
}

Rect2f MDiyLossImpl::toRect(Tensor x, int gridx, int gridy)
{
    Rect2f rect;
    float w = std::max(x[2].item<float>(), 0.0f);
    float h = std::max(x[3].item<float>(), 0.0f);
    rect.x = (x[0].item<float>() + gridx) * DIV_1_7 - 0.5f * w;
    rect.y = (x[1].item<float>() + gridy) * DIV_1_7 - 0.5f * h;
    rect.width = w;
    rect.height = h;
    return rect;
}

float MDiyLossImpl::calcIntersect(Rect2f a, Rect2f b)
{
    float arx = a.x + a.width;
    float ary = a.y + a.height;
    float brx = b.x + b.width;
    float bry = b.y + b.height;
    float crossx = std::max(a.x, b.x);
    float crossy = std::max(a.y, b.y);
    float crossw = std::min(arx, brx) - crossx;
    float crossh = std::min(ary, bry) - crossy;
    if (crossw > 0 && crossh > 0)
    {
        float scross = crossw * crossh;
        float sa = a.width * a.height;
        float sb = b.width * b.height;
        return scross / (sa + sb - scross + FLT_EPSILON);
    }
    return 0;
}

//---------------------------------------------------------------------------------------
// LOSS的每一行组成：20[class] + 1[conf] + 4[xywh] + 1[conf] + 4[xywh]
//---------------------------------------------------------------------------------------
Tensor MDiyLossImpl::forward(Tensor predict, Tensor target)
{
    Tensor p1Idx = torch::tensor({ 21, 22, 23, 24 }, c10::kCUDA);
    Tensor p2Idx = torch::tensor({ 26, 27, 28, 29 }, c10::kCUDA);
    Tensor p1ccIdx = torch::tensor({ 21, 22 }, c10::kCUDA);
    Tensor p1xyIdx = torch::tensor({ 23, 24 }, c10::kCUDA);
    Tensor p2ccIdx = torch::tensor({ 26, 27 }, c10::kCUDA);
    Tensor p2xyIdx = torch::tensor({ 28, 29 }, c10::kCUDA);
    Tensor confIdx = torch::tensor({ 20, 25 }, c10::kCUDA);
    Tensor classIdx = torch::arange(0, 20, c10::kCUDA);
    Tensor zeros = torch::tensor({ 0, 0 }, c10::kCUDA);
    Tensor error = torch::tensor(0.0f, c10::kCUDA);
    int batchSize = target.size(0);
    for (int i = 0; i < batchSize; i++)
    {
        for (int j = 0; j < MUL_7 * MUL_7; j++)
        {
            bool exist = (target[i][j][20].item<float>() != 0);
            if (exist)
            {
                int row = j / MUL_7;
                int col = j % MUL_7;
                Rect2f rect1 = toRect(predict[i][j].index_select(0, p1Idx), col, row);
                Rect2f rect2 = toRect(predict[i][j].index_select(0, p2Idx), col, row);
                Rect2f real = toRect(target[i][j].index_select(0, p1Idx), col, row);
                float iou1 = calcIntersect(rect1, real);
                float iou2 = calcIntersect(rect2, real);
                if (iou1 >= iou2)
                {
                    error += 5 * torch::sum((predict[i][j].index_select(0, p1ccIdx) - target[i][j].index_select(0, p1ccIdx)).pow(2)) +
                        5 * torch::sum((predict[i][j].index_select(0, p1xyIdx).sqrt() - target[i][j].index_select(0, p1xyIdx).sqrt()).pow(2));
                    error += torch::pow(predict[i][j][20] - iou1, 2);
                    error += 0.5f * torch::pow(predict[i][j][25] - iou2, 2);
                }
                else
                {
                    error += 5 * torch::sum((predict[i][j].index_select(0, p2ccIdx) - target[i][j].index_select(0, p1ccIdx)).pow(2)) +
                        5 * torch::sum((predict[i][j].index_select(0, p2xyIdx).sqrt() - target[i][j].index_select(0, p1xyIdx).sqrt()).pow(2));
                    error += torch::pow(predict[i][j][25] - iou2, 2);
                    error += 0.5f * torch::pow(predict[i][j][20] - iou1, 2);
                }
                static int pie = 0;
                if (pie % 250 == 0)
                {
                    cout << "预测的：" << predict[i][j][20].item<float>() << "," << predict[i][j][25].item<float>() <<
                        ";        IOU:" << iou1 << "," << iou2 << endl;
                }
                pie++;
                error += torch::sum((predict[i][j].index_select(0, classIdx) - target[i][j].index_select(0, classIdx)).pow(2));
            }
            else
            {
                error += 0.5f * torch::sum(predict[i][j].index_select(0, confIdx).pow(2));
            }
        }
    }
    return error;
}

/////////////////////////////////////////////////////////////////////////////////////////

MDataset::MDataset(const string& vocDir, const string& txtName, const vector<string>& className) :
    names(className), rand(true)
{
    string xmlDir = vocDir + "/VOCdevkit/VOC2012/Annotations/";
    string jpgDir = vocDir + "/VOCdevkit/VOC2012/JPEGImages/";
    string trainFile = vocDir + "/VOCdevkit/VOC2012/ImageSets/Main/" + txtName + ".txt";
    ifstream fs(trainFile, ios::in);
    if (!fs.is_open())
    {
        cout << "文件打开失败，检查文件路径是否错误" << endl;
        return;
    }
    while (!fs.eof())
    {
        string line;
        std::getline(fs, line);
        if (fs.fail())
        {
            continue;
        }
        string xmlPath = xmlDir + line + ".xml";
        tinyxml2::XMLDocument doc;
        doc.LoadFile(xmlPath.data());
        tinyxml2::XMLElement* root = doc.RootElement();
        images.push_back(jpgDir + root->FirstChildElement("filename")->GetText());
        Size2f real;
        real.width = root->FirstChildElement("size")->FirstChildElement("width")->FloatText();
        real.height = root->FirstChildElement("size")->FirstChildElement("height")->FloatText();
        float ratio = calcScaleRatio(real, Size(448, 448));
        vector<Label> myLabels;
        for (tinyxml2::XMLElement* item = root->FirstChildElement("object"); item; 
            item = item->NextSiblingElement("object"))
        {
            Label temp;
            string className = item->FirstChildElement("name")->GetText();
            auto found = std::find(names.begin(), names.end(), className);
            if (found == names.end())
            {
                throw std::exception("输入的分类不存在");
            }
            temp.category = int(found - names.begin());
            float xmin = item->FirstChildElement("bndbox")->FirstChildElement("xmin")->FloatText();
            float xmax = item->FirstChildElement("bndbox")->FirstChildElement("xmax")->FloatText();
            float ymin = item->FirstChildElement("bndbox")->FirstChildElement("ymin")->FloatText();
            float ymax = item->FirstChildElement("bndbox")->FirstChildElement("ymax")->FloatText();
            // 此处转换成把图片缩放到448×448后的矩形位置
            temp.center.x = 0.5f * (xmax + xmin) * ratio;
            temp.center.y = 0.5f * (ymax + ymin) * ratio;
            temp.size.width = (xmax - xmin) * ratio;
            temp.size.height = (ymax - ymin) * ratio;
            myLabels.push_back(temp);
        }
        cates.push_back(std::move(myLabels));
    }
}

Tensor MDataset::makeTarget(const vector<Label>& labels)
{
    // 根据网上资料显示，YOLO输出的7*7*30的结果第三维30中的两个定位结果代表的
    // 是同一个位置，所以target不需要设为30的宽度，一个位置就够了
    Tensor target = torch::zeros({ MUL_7 * MUL_7, 25 });
    for (auto& item : labels)
    {
        int64 ccol = int64(item.center.x / 448 * MUL_7);
        int64 crow = int64(item.center.y / 448 * MUL_7);
        target[crow * MUL_7 + ccol] = 0; // 整行置零
        target[crow * MUL_7 + ccol][item.category] = 1.0f;
        target[crow * MUL_7 + ccol][20] = 1.0f;
        target[crow * MUL_7 + ccol][21] = MUL_7 * fmodf(item.center.x / 448, DIV_1_7);
        target[crow * MUL_7 + ccol][22] = MUL_7 * fmodf(item.center.y / 448, DIV_1_7);
        target[crow * MUL_7 + ccol][23] = item.size.width / 448;
        target[crow * MUL_7 + ccol][24] = item.size.height / 448;
    }
    return target;
}

void MDataset::testShowTarget(const Mat& image, const string& file, Tensor target)
{
    size_t found = file.find_last_of('/');
    string title = file.substr(found + 1);
    Mat color = image.clone();
    for (int i = 0; i < MUL_7 * MUL_7; i++)
    {
        if (target[i][20].item<float>() == 0)
        {
            continue; /* 本格子不含物体 */
        }
        int icx = DIV_1_7 * ((i % MUL_7) + target[i][21].item<float>()) * 448;
        int icy = DIV_1_7 * ((i / MUL_7) + target[i][22].item<float>()) * 448;
        int icw = target[i][23].item<float>() * 448;
        int ich = target[i][24].item<float>() * 448;
        rectangle(color, Rect(icx - icw / 2, icy - ich / 2, icw, ich), Scalar(0, 0, 255));
        torch::Tensor select = torch::arange(0, 20);
        torch::Tensor maxval = target[i].index_select(0, select).argmax(0);
        int nameIndex = maxval.item<int>();
        putText(color, names[nameIndex], Point2i(icx - icw / 2, icy - ich / 2 + 14), 0, 0.6, Scalar(0, 255, 0));
        putText(color, title, Point2i(0, 14), 0, 0.6, Scalar(255, 0, 112));
    }
    imshow("样本图片", color);
    if (color.cols < 480)
    {
        resizeWindow("样本图片", 480, color.rows);
    }
    waitKey(5000);
}

void MDataset::setRandomize(bool irand)
{
    rand = irand;
}

torch::data::Example<> MDataset::get(size_t index)
{
    Mat loads = imread(images[index], IMREAD_COLOR);
    Mat image = scaleKeepRatio(loads, Size(448, 448));
    vector<Label> labels = cates[index];
    if (rand)
    {
        tryTransform(image, labels);
    }

    Tensor target = makeTarget(labels);
#ifdef SHOW_FINAL_TARGET
    testShowTarget(image, images[index], target); // 显示变换后的图片标签
#endif

    Tensor imageTensor = torch::from_blob(image.data, { image.rows, image.cols, 3 }, torch::kByte);
    imageTensor = imageTensor.permute({ 2, 0, 1 }).toType(torch::kF32).div(255.0f);
    // 对Tensor的操作只要不克隆，它的内存顺序就不会变(permute)
    return { imageTensor, target };
}

torch::optional<size_t> MDataset::size() const
{
    return images.size();
}

void MDataset::tryTransform(Mat& input, vector<Label>& labels)
{
    float ratio = float(rng);
    if (ratio < 0.3f)
    {
        translate(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.3f)
    {
        magnify(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.3f)
    {
        flip(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.3f)
    {
        reexposure(input, labels);
    }
}

//---------------------------------------------------------------------------------------
// 随机平移
//---------------------------------------------------------------------------------------
void MDataset::translate(Mat& input, vector<Label>& labels)
{
    Point2f minxy(448, 448);
    Point2f maxxy(0, 0);
    for (const auto& item : labels)
    {
        minxy.x = std::min(minxy.x, item.center.x - 0.5f * item.size.width);
        minxy.y = std::min(minxy.y, item.center.y - 0.5f * item.size.height);
        maxxy.x = std::max(maxxy.x, item.center.x + 0.5f * item.size.width);
        maxxy.y = std::max(maxxy.y, item.center.y + 0.5f * item.size.height);
    }
    float errx1 = -minxy.x + 1; /* 设置1px容差 */
    float errx2 = input.cols - maxxy.x - 1;
    float erry1 = -minxy.y + 1;
    float erry2 = input.rows - maxxy.y - 1;
    if (errx2 - errx1 < 5 && erry2 - erry1 < 5)
    {
        return; // 容差太小就不平移了
    }
    float movx = rng.uniform(errx1, errx2);
    float movy = rng.uniform(erry1, erry2);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = 1;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = movx;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = 1;
    trans.at<float>(1, 2) = movy;
    warpAffine(input, input, trans, input.size());
    for (auto& item : labels)
    {
        item.center.x += movx;
        item.center.y += movy;
    }
}

//---------------------------------------------------------------------------------------
// 随机缩放
//---------------------------------------------------------------------------------------
void MDataset::magnify(Mat& input, vector<Label>& labels)
{
    float minScale;
    float maxScale;
    Size2f mins(448, 448);
    for (const auto& item : labels)
    {
        mins.width = std::min(mins.width, item.size.width);
        mins.height = std::min(mins.height, item.size.height);
    }
    minScale = std::max(0.8f, 20 / std::min(mins.width, mins.height));
    Point2f maxxy(0, 0);
    for (const auto& item : labels)
    {
        maxxy.x = std::max(maxxy.x, item.center.x + 0.5f * item.size.width);
        maxxy.y = std::max(maxxy.y, item.center.y + 0.5f * item.size.height);
    }
    maxScale = std::min(1.2f, (448 - 1) / std::max(maxxy.x, maxxy.y)); // -1留点余量
    if (minScale >= maxScale)
    {
        return; // 不能缩放
    }
    float thisScale = rng.uniform(minScale, maxScale);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = thisScale;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = 0;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = thisScale;
    trans.at<float>(1, 2) = 0;
    warpAffine(input, input, trans, input.size());
    for (auto& item : labels)
    {
        item.center.x *= thisScale;
        item.center.y *= thisScale;
        item.size.width *= thisScale;
        item.size.height *= thisScale;
    }
}

//---------------------------------------------------------------------------------------
// 随机水平反转
//---------------------------------------------------------------------------------------
void MDataset::flip(Mat& input, vector<Label>& labels)
{
    bool fx = rng(2);
    bool fy = 0/*rng(2)*/;
    if (!fx && !fy)
    {
        return; // 不需反转就直接返回
    }
    Mat xcoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < xcoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            xcoor.at<float>(i, j) = float(fx ? xcoor.cols - j : j);
        }
    }
    Mat ycoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < ycoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            ycoor.at<float>(i, j) = float(fy ? ycoor.rows - i : i);
        }
    }
    remap(input, input, xcoor, ycoor, INTER_LINEAR);
    if (fx)
    {
        for (auto& item : labels)
        {
            item.center.x = input.cols - 1 - item.center.x;
        }
    }
    if (fy)
    {
        for (auto& item : labels)
        {
            item.center.y = input.rows - 1 - item.center.y;
        }
    }
}

//---------------------------------------------------------------------------------------
// 随机亮度对比度
//---------------------------------------------------------------------------------------
void MDataset::reexposure(Mat& input, vector<Label>& labels)
{
    Mat hsvImage;
    float expose = rng.uniform(0.9f, 1.25f);
    float saturation = rng.uniform(0.9f, 1.25f);
    cvtColor(input, hsvImage, COLOR_BGR2HSV);
    Mat channel[3];
    split(hsvImage, channel);
    channel[1] *= saturation;
    channel[2] *= expose;
    merge(channel, 3, hsvImage);
    cvtColor(hsvImage, input, COLOR_HSV2BGR);
}

const static vector<string> classes =
{
    "aeroplane", "bicycle", "bird", "boat", "bottle",
    "bus", "car", "cat", "chair", "cow",
    "diningtable", "dog", "horse", "motorbike", "person",
    "pottedplant", "sheep", "sofa", "train", "tvmonitor"
};

//---------------------------------------------------------------------------------------
// LOSS的每一行组成：20[class] + 1[conf] + 4[xywh] + 1[conf] + 4[xywh]
//---------------------------------------------------------------------------------------
void train(Yolo& net)
{
    torch::load(net, "yolov1.pt");
    torch::optim::SGD optimizer(net->parameters(), torch::optim::SGDOptions(0.0001).momentum(0.8));
    torch::load(optimizer, "yolov1-opt.pt");

    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;
    MapDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes).
        map(torch::data::transforms::Stack<>());
    DataLoader dataLoader = torch::data::make_data_loader(myDataset, 10);
    Curve curve(600, 900);

    net->to(c10::kCUDA);
    net->train();
    MDiyLoss lossFunc;
    for (int i = 0; i < 900; i++)
    {
        float lossValue = 0;
        float lossNum = 0;
        for (auto& batch : *dataLoader)
        {
            Tensor input = batch.data.to(c10::kCUDA);
            Tensor target = batch.target.to(c10::kCUDA);
            Tensor predict = net->forward(input);
            Tensor loss = lossFunc(predict, target);
            optimizer.zero_grad();
            loss.backward();
            optimizer.step();
            lossValue += loss.item<float>();
            lossNum++;
        }
        float eachVal = lossValue / lossNum;
        cout << "批次: " << i << "。训练损失：" << eachVal << endl;
        curve.append(i, eachVal);
        if (i % 10 == 9)
        {
            torch::save(net, "yolov1.pt");
            torch::save(optimizer, "yolov1-opt.pt");
        }
    }
}

//---------------------------------------------------------------------------------------
// 这是验证集校验的函数。我训练YOLO只能训练到LOSS降低至4.1左右，基本就不再下降了。在
// 此函数验证时能预测到物体的概率在50%左右，物体预测正确率45%左右。另外这个网络经常性
// 的漏掉一些物体或物体外包围矩形偏差较大
//---------------------------------------------------------------------------------------
void check(Yolo& net)
{
    MDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "val", classes);
    myDataset.setRandomize(false);
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();

    torch::load(net, "yolov1.pt");

    net->to(c10::kCUDA);
    net->eval();
    int count = (int)myDataset.size().value();
    for (int i = 0; i < 20; i++)
    {
        torch::data::Example<> record = myDataset.get(rng.uniform(0, count));
        Mat image(record.data.size(1), record.data.size(2), CV_32FC3, record.data.data_ptr());
        vector<ObjectFound> results;
        Tensor input = record.data.unsqueeze_(0).to(c10::kCUDA);
        net->predict(input, results);
        for (const auto& item : results)
        {
            rectangle(image, Rect(item.bound), Scalar(0, 0, 1.0));
            string name = classes[item.category] + "(" + std::to_string(item.confidence) + ")";
            putText(image, name, Point2i(item.bound.x, item.bound.y + 14), 0, 0.6, Scalar(0, 255, 0));
        }
        int s = 1;
        for (int t = 0; t < record.target.size(0); t++)
        {
            if (record.target[t][20].item<int>() != 0)
            {
                Tensor maxi = record.target[t].index_select(0, torch::arange(0, 20)).argmax(0);
                putText(image, classes[maxi.item<int>()], Point2i(0, s * 14), 0, 0.6, Scalar(127, 192, 0));
                s++;
            }
        }
        imshow("预测图片", image);
        if (image.cols < 480)
        {
            resizeWindow("预测图片", 480, image.rows);
        }
        waitKey(3000);
    }
    destroyAllWindows();
}

/////////////////////////////////////////////////////////////////////////////////////////

int main()
{
    Yolo yolo(20);

    /* 这里是给网络参数赋初始值，默认的初始值是[-0.1,0.1] */
    //yolo->apply([](torch::nn::Module& modu) {
    //    torch::nn::Conv2dImpl* conv = modu.as<torch::nn::Conv2dImpl>();
    //    if (conv) {
    //        torch::nn::init::uniform_(conv->weight, -0.001, 0.001);
    //        torch::nn::init::uniform_(conv->bias, -0.001, 0.001);
    //    }
    //    torch::nn::LinearImpl* ln = modu.as<torch::nn::LinearImpl>();
    //    if (ln) {
    //        torch::nn::init::uniform_(ln->weight, -0.001, 0.001);
    //        torch::nn::init::uniform_(ln->bias, -0.001, 0.001);
    //    }
    //});

    train(yolo);
    //check(yolo);

    int z = 0;
    return 0;
}







