﻿#include "YoloV3.h"
#include "tinyxml2.h"
#include "opencv.hpp"
#include <iostream>
#include <fstream>
#include <random>

#pragma warning(disable: 26451)

using namespace std;
using namespace cv;
using torch::nn::functional::binary_cross_entropy;
using torch::nn::functional::binary_cross_entropy_with_logits;
using torch::nn::functional::BinaryCrossEntropyFuncOptions;
using torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions;
using torch::nn::MaxPool2dOptions;
using torch::nn::DropoutOptions;
using torch::nn::BatchNorm2dOptions;
using torch::nn::Conv2dOptions;
using torch::nn::LeakyReLUOptions;
using torch::nn::UpsampleOptions;

#define ASSERT(x) assert(x)
#define PSIZE 416 // YOLO-V3的输入图片尺寸
#define MUL_13 13
#define DIV_1_13 (1.0f / 13.0f)
#define MUL_26 26
#define DIV_1_26 (1.0f / 26.0f)
#define MUL_52 52
#define DIV_1_52 (1.0f / 52.0f)

constexpr int M13_UPPER = MUL_13 * MUL_13;
constexpr int M26_UPPER = MUL_13 * MUL_13 + MUL_26 * MUL_26;
constexpr int M52_UPPER = MUL_13 * MUL_13 + MUL_26 * MUL_26 + MUL_52 * MUL_52;

/* 9个预设的anchor大小，由样本分类得到 */
/* 尺寸从大到小排列和yolo输出的张量相同 */
static const Size2f predefined[] =
{
    { 405.165f, 298.202f },
    { 260.577f, 378.928f },
    { 269.060f, 238.397f },
    { 382.339f, 152.582f },
    { 146.132f, 266.731f },
    { 193.512f, 123.552f },
    { 95.9557f, 174.063f },
    { 76.1172f, 89.3362f },
    { 30.3481f, 36.6398f },
};

const static vector<string> classes =
{
    "aeroplane", "bicycle", "bird", "boat", "bottle",
    "bus", "car", "cat", "chair", "cow",
    "diningtable", "dog", "horse", "motorbike", "person",
    "pottedplant", "sheep", "sofa", "train", "tvmonitor"
};

template<typename T>
std::ostream& operator<<(std::ostream& os, const vector<T>& vecs)
{
    os << "[ ";
    for (auto item : vecs)
    {
        os << item << ", ";
    }
    os << "\b\b ]";
    return os;
}

template<int size>
std::ostream& operator<<(std::ostream& os, const float(&vecs)[size])
{
    os << "[ ";
    for (int i = 0; i < size; i++)
    {
        os << vecs[i] << ", ";
    }
    os << "\b\b ]";
    return os;
}

float calcScaleRatio(const Size2f& size, const Size2f& bound)
{
    float sx = bound.width / size.width;
    float sy = bound.height / size.height;
    float ss = std::min(sx, sy);
    return ss;
}

Mat scaleKeepRatio(const Mat& input, const Size& bound)
{
    float ratio = calcScaleRatio(input.size(), bound);
    Size dest((int)(input.cols * ratio), (int)(input.rows * ratio));
    Mat rescale;
    resize(input, rescale, dest);
    Mat image = Mat::zeros(bound, CV_8UC3);
    rescale.copyTo(image(Rect(0, 0, dest.width, dest.height)));
    return image;
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

Scalar Curve::colors[2] = { Scalar::all(0), Scalar(255, 64, 64) };

Curve::Curve(int rows, int cols)
{
    minx = 0;
    maxx = 1000;
    miny = 0;
    maxy = 1;
    lastPoint[0].x = lastPoint[0].y = INT_MIN;
    lastPoint[1].x = lastPoint[1].y = INT_MIN;
    curve = Mat(rows, cols, CV_8UC3, Scalar(255, 255, 255));
    contentRegion = Rect(35, 10, cols - 45, rows - 20);

    rectangle(curve, Rect(contentRegion.x, contentRegion.y, 
        contentRegion.width + 1, contentRegion.height + 1), Scalar(223, 127, 34));
    paintYAxis();
}

void Curve::paintYAxis()
{
    for (int i = 0; i <= 5; i++)
    {
        char strValue[32] = "";
        int y = int(contentRegion.y + (1.0f - 1.0f / 5.0f * i) * contentRegion.height);
        float value = miny + (1.0f / 5.0f * i) * (maxy - miny);
        sprintf_s(strValue, "%4.1f", value);
        putText(curve, strValue, Point2i(1, y + 4), 0, 0.4, Scalar(223, 127, 34));
        line(curve, Point2i(contentRegion.x, y), Point2i(contentRegion.x + contentRegion.width, y), Scalar(223, 127, 34));
    }
}

void Curve::show()
{
    cv::imshow("LOSS折线图", curve);
    waitKey(1);
}

void Curve::append(int index, const Point2f& pt)
{
    Point2i scr = real2Screen(pt);
    if (lastPoint[index].x == INT_MIN || lastPoint[index].y == INT_MIN)
    {
        if (scr.x >= 0 && scr.x < curve.cols && 
            scr.y >= 0 && scr.y < curve.rows)
        {
            Vec4b myColor = colors[index];
            curve.at<Vec3b>(scr.y, scr.x) = Vec3b(myColor[0], myColor[1], myColor[2]);
        }
    }
    else /* 不是第一个点 */
    {
        line(curve, lastPoint[index], scr, colors[index]);
    }
    lastPoint[index] = scr;
    show();
}

void Curve::append(int index, float x, float y)
{
    append(index, Point2f(x, y));
}

Point2i Curve::real2Screen(const Point2f& pos)
{
    float xper = (pos.x - minx) / (maxx - minx);
    float yper = (pos.y - miny) / (maxy - miny);
    int x = int(contentRegion.x + xper * contentRegion.width);
    int y = int(contentRegion.y + (1 - yper) * contentRegion.height);
    return Point2i(x, y);
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

MConv2dImpl::MConv2dImpl(int input1, int input2, int kernel, int stride, int padding) : 
    conv1(nullptr), normal2(nullptr), relu3(nullptr)
{
    conv1 = Conv2d(Conv2dOptions(input1, input2, kernel).stride(stride).padding(padding).bias(false));
    normal2 = BatchNorm2d(BatchNorm2dOptions(input2));
    relu3 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv1 = register_module("conv1", conv1);
    normal2 = register_module("normal2", normal2);
    relu3 = register_module("relu3", relu3);
}

Tensor MConv2dImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    x = normal2->forward(x);
    x = relu3->forward(x);
    return x;
}

MResidualUnitImpl::MResidualUnitImpl(int input1, int medium, int input2) :
    conv1(nullptr), conv2(nullptr)
{
    conv1 = MConv2d(input1, medium, 1, 1, 0);
    conv2 = MConv2d(medium, input2, 3, 1, 1);
    conv1 = register_module("ru-conv1", conv1);
    conv2 = register_module("ru-conv2", conv2);
}

Tensor MResidualUnitImpl::forward(Tensor x)
{
    Tensor y = conv1->forward(x);
    y = conv2->forward(y);
    return x + y;
}

MMultiResidualImpl::MMultiResidualImpl(int repeat, int input1, int medium, int input2) :
    conv1(nullptr)
{
    conv1 = MConv2d(input1, input2, 3, 2, 1); /* 缩尺寸 */
    for (int i = 0; i < repeat; i++)
    {
        seq2->push_back(MResidualUnit(input2, medium, input2));
    }
    conv1 = register_module("mru-conv1", conv1);
    seq2 = register_module("mru-seq2", seq2);
}

Tensor MMultiResidualImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    x = seq2->forward(x);
    return x;
}

YoloImpl::YoloImpl(int classes) :
    conv01(nullptr), mmr01(nullptr), mmr02(nullptr), mmr03(nullptr),
    mmr04(nullptr), mmr05(nullptr), conv07(nullptr), outConv01(nullptr),
    conv08(nullptr), us01(nullptr), conv14(nullptr), outConv02(nullptr),
    conv15(nullptr), us02(nullptr), conv21(nullptr), outConv03(nullptr)
{
    int dimout = (classes + 5) * 3;
    conv01 = MConv2d(3, 32, 3, 1, 1); /* 输入：3*416*416 */
    conv01 = register_module("yl-conv01", conv01);
    mmr01 = MMultiResidual(1, 32, 32, 64); /* 208*208 */
    mmr02 = MMultiResidual(2, 64, 64, 128); /* 104*104 */
    mmr03 = MMultiResidual(8, 128, 128, 256); /* 52*52 */
    mmr04 = MMultiResidual(8, 256, 256, 512); /* 26*26 */
    mmr05 = MMultiResidual(4, 512, 512, 1024); /* 13*13 */
    mmr01 = register_module("yl-mmr01", mmr01);
    mmr02 = register_module("yl-mmr02", mmr02);
    mmr03 = register_module("yl-mmr03", mmr03);
    mmr04 = register_module("yl-mmr04", mmr04);
    mmr05 = register_module("yl-mmr05", mmr05);

    mcb51->push_back(MConv2d(1024, 512, 1, 1));
    mcb51->push_back(MConv2d(512, 1024, 3, 1, 1));;
    mcb51->push_back(MConv2d(1024, 512, 1, 1));
    mcb51->push_back(MConv2d(512, 1024, 3, 1, 1));
    mcb51->push_back(MConv2d(1024, 512, 1, 1));
    mcb51 = register_module("yl-mcb51", mcb51);

    conv07 = MConv2d(512, 1024, 3, 1, 1);
    outConv01 = Conv2d(Conv2dOptions(1024, dimout, 1).bias(false)); /* 输出13*13*75 */
    conv07 = register_module("yl-conv07", conv07);
    outConv01 = register_module("yl-outConv01", outConv01);

    conv08 = MConv2d(512, 256, 1, 1);
    us01 = Upsample(UpsampleOptions().scale_factor(vector<double>({ 2.0, 2.0 })));
    mcb52->push_back(MConv2d(768, 256, 1, 1));
    mcb52->push_back(MConv2d(256, 512, 3, 1, 1));
    mcb52->push_back(MConv2d(512, 256, 1, 1));
    mcb52->push_back(MConv2d(256, 512, 3, 1, 1));
    mcb52->push_back(MConv2d(512, 256, 1, 1));
    mcb52 = register_module("yl-mcb52", mcb52);
    conv08 = register_module("yl-conv08", conv08);
    us01 = register_module("yl-us01", us01);

    conv14 = MConv2d(256, 512, 3, 1, 1);
    outConv02 = Conv2d(Conv2dOptions(512, dimout, 1).bias(false)); /* 输出26*26*75 */
    conv14 = register_module("yl-conv14", conv14);
    outConv02 = register_module("yl-outConv02", outConv02);

    conv15 = MConv2d(256, 128, 1, 1);
    us02 = Upsample(UpsampleOptions().scale_factor(vector<double>({ 2.0, 2.0 })));
    mcb53->push_back(MConv2d(384, 128, 1, 1));
    mcb53->push_back(MConv2d(128, 256, 3, 1, 1));
    mcb53->push_back(MConv2d(256, 128, 1, 1));
    mcb53->push_back(MConv2d(128, 256, 3, 1, 1));
    mcb53->push_back(MConv2d(256, 128, 1, 1));
    mcb53 = register_module("yl-mcb53", mcb53);
    conv15 = register_module("yl-conv15", conv15);
    us02 = register_module("yl-us02", us02);

    conv21 = MConv2d(128, 256, 3, 1, 1);
    outConv03 = Conv2d(Conv2dOptions(256, dimout, 1).bias(false)); /* 输出52*52*75 */
    conv21 = register_module("yl-conv21", conv21);
    outConv03 = register_module("yl-outConv03", outConv03);
}

Tensor YoloImpl::forward(Tensor x)
{
    x = conv01->forward(x);
    x = mmr01->forward(x);
    x = mmr02->forward(x);
    Tensor base1 = mmr03->forward(x);
    Tensor base2 = mmr04->forward(base1);
    Tensor base3 = mmr05->forward(base2);
    Tensor sec1 = mcb51->forward(base3);
    Tensor out1 = conv07->forward(sec1);
    out1 = outConv01->forward(out1); /* 输出75*13*13 */

    sec1 = conv08->forward(sec1);
    sec1 = us01->forward(sec1);
    sec1 = torch::concat({ base2, sec1 }, 1);
    Tensor sec2 = mcb52->forward(sec1);
    Tensor out2 = conv14->forward(sec2);
    out2 = outConv02->forward(out2); /* 输出75*26*26 */

    sec2 = conv15->forward(sec2);
    sec2 = us02->forward(sec2);
    sec2 = torch::concat({ base1, sec2 }, 1);
    Tensor sec3 = mcb53->forward(sec2);
    Tensor out3 = conv21->forward(sec3);
    out3 = outConv03->forward(out3); /* 输出75*52*52 */
    
    // 此处的反转通道是为了把BATCH*75*M*M转换成需要的BATCH*M*M*75
    // 然后再压缩成BATCH*(MM)*75，拼接在一起
    out1 = out1.permute({ 0, 2, 3, 1 });
    out1 = torch::flatten(out1, 1, 2);
    out2 = out2.permute({ 0, 2, 3, 1 });
    out2 = torch::flatten(out2, 1, 2);
    out3 = out3.permute({ 0, 2, 3, 1 });
    out3 = torch::flatten(out3, 1, 2);
    Tensor z = torch::concat({ out1, out2, out3 }, 1);
    return z;
}

bool YoloImpl::predict(Tensor x, vector<ObjectFound>& output)
{
    Tensor result = forward(x);
    vector<ObjectFound> founds;
    outputToVector(result, 0.6f, founds);
    nms(founds, 0.4f, output);
    return !output.empty();
}

//---------------------------------------------------------------------------------------
// 把网络预测结果转换成数组形式。一次只能预测一张图片
// input：预测张量
// thres：信心阈值[0, 1]
// objects：输出数组
//---------------------------------------------------------------------------------------
void YoloImpl::outputToVector(Tensor input, float thres, vector<ObjectFound>& objects)
{
    Tensor result = input.squeeze_(0).to(torch::kCPU);
    int splits[] = { MUL_13, MUL_26, MUL_52 };
    int baseRows[] = { 0, M13_UPPER, M26_UPPER };
    int baseOffset = 0;
    for (int i = 0; i < M52_UPPER; i++)
    {
        if (i == M13_UPPER || i == M26_UPPER)
        {
            baseOffset++;
        }
        for (int j = 0; j < 3; j++)
        {
            float oneLine[25];
            Tensor indices = torch::arange(j * 25, j * 25 + 23);
            Tensor sigmoids = torch::sigmoid(result[i].index_select(0, indices));
            std::copy_n(sigmoids.const_data_ptr<float>(), 23, oneLine);
            oneLine[23] = result[i][j * 25 + 23].item<float>();
            oneLine[24] = result[i][j * 25 + 24].item<float>();
            const float* maxi = std::max_element(oneLine, oneLine + 20);
            if (oneLine[20] > 0.1)
            {
                const float* zp = result[i].const_data_ptr<float>();
                int z = 0;
            }
            float conf = oneLine[20] * *maxi;
            if (conf > thres)
            {
                ObjectFound temp;
                int which = j + baseOffset * 3;
                float cx = ((i - baseRows[baseOffset]) % splits[baseOffset] + oneLine[21]) * (PSIZE / splits[baseOffset]);
                float cy = ((i - baseRows[baseOffset]) / splits[baseOffset] + oneLine[22]) * (PSIZE / splits[baseOffset]);
                float rw = expf(oneLine[23]) * predefined[which].width;
                float rh = expf(oneLine[24]) * predefined[which].height;
                temp.bound.x = cx - 0.5f * rw;
                temp.bound.y = cy - 0.5f * rh;
                temp.bound.width = rw;
                temp.bound.height = rh;
                temp.category = (maxi - oneLine);
                temp.confidence = conf;
                objects.push_back(temp);
            }
        }
    }
}

//---------------------------------------------------------------------------------------
// 局部极大值抑制。本函数已修改为网上的说明，极大值抑制按每一个分类进行的。即先把同一
// 类的结果放一块，然后分别对每一类进行极大值抑制。
// objects：输入所有结果
// thres：分数阈值
// wells：抑制之后的输出
//---------------------------------------------------------------------------------------
void YoloImpl::nms(const vector<ObjectFound>& objects, float thres, vector<ObjectFound>& wells)
{
    vector<ObjectFound> oneclass = objects;
    // 遍历20个分类，把相同类别放到oneclass里进行NMS
    for (int i = 0; i < 20; i++)
    {
        oneclass.clear();
        for (const auto &item : objects)
        {
            if (item.category == i)
            {
                oneclass.push_back(item);
            }
        }
        if (oneclass.empty())
        {
            continue; /* 空分类 */
        }
        // 下面是对一个单独的类NMS，先排序
        std::sort(oneclass.begin(), oneclass.end(),
            [](const ObjectFound& a, const ObjectFound& b) { return a.confidence > b.confidence; });
        vector<ObjectFound> filtered;
        while (!oneclass.empty())
        {
            filtered.push_back(oneclass.front());
            oneclass.erase(oneclass.begin());
            Rect2f curr = filtered.back().bound;
            for (auto iter = oneclass.begin(); iter != oneclass.end(); /*iter++*/)
            {
                if (calcIntersect(curr, iter->bound) >= thres)
                {
                    iter = oneclass.erase(iter);
                    continue;
                }
                iter++;
            }
        }
        wells.insert(wells.end(), filtered.begin(), filtered.end());
    }
}

float YoloImpl::calcIntersect(Rect2f a, Rect2f b)
{
    float arx = a.x + a.width;
    float ary = a.y + a.height;
    float brx = b.x + b.width;
    float bry = b.y + b.height;
    float crossx = std::max(a.x, b.x);
    float crossy = std::max(a.y, b.y);
    float crossw = std::min(arx, brx) - crossx;
    float crossh = std::min(ary, bry) - crossy;
    if (crossw > 0 && crossh > 0)
    {
        float scross = crossw * crossh;
        float sa = a.width * a.height;
        float sb = b.width * b.height;
        return scross / (sa + sb - scross);
    }
    return 0;
}

/////////////////////////////////////////////////////////////////////////////////////////

MDiyLossImpl::MDiyLossImpl()
{
}

void MDiyLossImpl::forEach(vector<int64>& vecs, int64 base, int64 start, int64 loopCount)
{
    int64 end = start + loopCount;
    for (int64 i = start; i < end; i++)
    {
        vecs.push_back(base + i);
    }
}

//---------------------------------------------------------------------------------------
// LOSS的前169行是13×13，中间676行是26×26，后面2704行是52×52。每一行组成是3个预测
// 结果：(20[class] + 1[conf] + 4[xywh])×3。
//---------------------------------------------------------------------------------------
Tensor MDiyLossImpl::forward(Tensor predict, Tensor target)
{
    float lambdaCoords[9];
    for (int i = 0; i < std::size(predefined); i++)
    {
        lambdaCoords[i] = 2 - predefined[i].area() / (PSIZE * PSIZE);
    }
    Tensor error = torch::tensor(0.0f, torch::kCUDA);
    int batchSize = target.size(0);

    vector<int64> marks;
    for (int i = 0; i < batchSize; i++)
    {
        for (int j = 0; j < M52_UPPER; j++)
        {
            marks.push_back((i * M52_UPPER + j) * 75 + 20);
            marks.push_back((i * M52_UPPER + j) * 75 + 45);
            marks.push_back((i * M52_UPPER + j) * 75 + 70);
        }
    }
    Tensor myMarks = torch::from_blob(marks.data(), { batchSize, M52_UPPER, 3 }, torch::kLong);
    myMarks = myMarks.to(torch::kCUDA);
    myMarks = target.take(myMarks);
    myMarks = myMarks.to(torch::kCPU); /* 搬到CPU上访问快 */
    
    vector<int64> classVecs;
    vector<int64> confVecs;
    vector<float> weightVecs;
    vector<int64> xyVecs;
    vector<int64> whVecs;
    vector<int64> noneVecs;
    for (int i = 0; i < batchSize; i++)
    {
        int category = 0;
        for (int j = 0; j < M52_UPPER; j++)
        {
            if (j == M13_UPPER || j == M26_UPPER)
            {
                category++; /* 属于13*13或26*26或52*52中的哪一个 */
            }
            for (int k = 0; k < 3; k++)
            {
                float label = myMarks[i][j][k].item<float>();
                if (label == 0) /* 没有物体的索引 */
                {
                    forEach(noneVecs, (i * M52_UPPER + j) * 75, k * 25 + 20, 1);
                }
                else if (label != 0) /* 有物体的索引 */
                {
                    forEach(classVecs, (i * M52_UPPER + j) * 75, k * 25 + 0, 20);
                    forEach(confVecs, (i * M52_UPPER + j) * 75, k * 25 + 20, 1);
                    forEach(xyVecs, (i * M52_UPPER + j) * 75, k * 25 + 21, 2);
                    forEach(whVecs, (i * M52_UPPER + j) * 75, k * 25 + 23, 2);
                    weightVecs.push_back(lambdaCoords[category * 3 + k]); /* xy,wh都是两个数 */
                    weightVecs.push_back(lambdaCoords[category * 3 + k]); /* 所以push两次对应 */
                }
                else /* 标注只有01两个值 */
                {
                    cout << "Error label value: " << label << ".\r\n";
                }
            }
        }
    }
    /* 有物体加上无物体就是总的网格数量 */
    ASSERT(confVecs.size() + noneVecs.size() == batchSize * M52_UPPER * 3);
    ASSERT(weightVecs.size() == xyVecs.size());
    ASSERT(weightVecs.size() == whVecs.size());
    Tensor indices;
    if (!classVecs.empty())
    {
        /* 下面是分类和信心值计算 */
        indices = torch::from_blob(classVecs.data(), { (int)classVecs.size() }, torch::kLong);
        indices = indices.to(torch::kCUDA);
        Tensor e1 = binary_cross_entropy_with_logits(predict.take(indices), target.take(indices),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum));
        error += e1;
        //cout << "1- " << error.item<float>() << endl;
        indices = torch::from_blob(confVecs.data(), { (int)confVecs.size() }, torch::kLong);
        indices = indices.to(torch::kCUDA);
        Tensor e2 = 4.0f * binary_cross_entropy_with_logits(predict.take(indices), target.take(indices),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum));
        error += e2;
        //cout << "2- " << error.item<float>() << endl;
        /* 下面是矩形框的计算 */
        Tensor myWeights = torch::from_blob(weightVecs.data(), { (int)weightVecs.size() }, torch::kFloat);
        myWeights = myWeights.to(torch::kCUDA);
        indices = torch::from_blob(xyVecs.data(), { (int)xyVecs.size() }, torch::kLong);
        indices = indices.to(torch::kCUDA);
        Tensor wmce = torch::pow(torch::sigmoid(predict.take(indices)) - target.take(indices), 2) * myWeights;
        Tensor e3 = wmce.sum(); // wmce.numel();
        error += e3;
        //cout << "3- " << predict.take(indices) << endl;
        indices = torch::from_blob(whVecs.data(), { (int)whVecs.size() }, torch::kLong);
        indices = indices.to(torch::kCUDA);
        Tensor wmse = torch::pow(predict.take(indices) - target.take(indices), 2) * myWeights;
        Tensor e4 = wmse.sum(); // wmse.numel();
        error += e4;
        //cout << "obj- " << e1.item<float>() << 
        //    " " << e2.item<float>() << 
        //    " " << e3.item<float>() << 
        //    " " << e4.item<float>();
    }
    if (!noneVecs.empty())
    {
        /* 这是无物体的权重计算 */
        indices = torch::from_blob(noneVecs.data(), { (int)noneVecs.size() }, torch::kLong);
        indices = indices.to(torch::kCUDA);
        Tensor e5 = binary_cross_entropy_with_logits(predict.take(indices), target.take(indices),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum));
        error += e5;
        //cout << " " << e5.item<float>() << endl;
    }
    int allCount = (int)classVecs.size() + (int)confVecs.size() + (int)xyVecs.size() +
        (int)whVecs.size() + (int)noneVecs.size();
    error /= allCount;

    //static int looper = 0;
    //if (looper % 5 == 4)
    //{
    //    cout << "当前批次：" << error.item<float>() << endl;
    //}
    //looper++;
    return error;
}

/////////////////////////////////////////////////////////////////////////////////////////

MDataset::MDataset(const string& vocDir, const string& txtName, const vector<string>& iclassName) :
    names(iclassName), rand(true)
{
    string xmlDir = vocDir + "/VOCdevkit/VOC2012/Annotations/";
    string jpgDir = vocDir + "/VOCdevkit/VOC2012/JPEGImages/";
    string trainFile = vocDir + "/VOCdevkit/VOC2012/ImageSets/Main/" + txtName + ".txt";
    ifstream fs(trainFile, ios::in);
    if (!fs.is_open())
    {
        cout << "文件打开失败，检查文件路径是否错误" << endl;
        return;
    }
    while (!fs.eof())
    {
        string line;
        std::getline(fs, line);
        if (fs.fail())
        {
            continue;
        }
        string xmlPath = xmlDir + line + ".xml";
        tinyxml2::XMLDocument doc;
        doc.LoadFile(xmlPath.data());
        tinyxml2::XMLElement* root = doc.RootElement();
        images.push_back(jpgDir + root->FirstChildElement("filename")->GetText());
        Size2f real;
        real.width = root->FirstChildElement("size")->FirstChildElement("width")->FloatText();
        real.height = root->FirstChildElement("size")->FirstChildElement("height")->FloatText();
        float ratio = calcScaleRatio(real, Size(PSIZE, PSIZE));
        vector<Label> myLabels;
        for (tinyxml2::XMLElement* item = root->FirstChildElement("object"); item; 
            item = item->NextSiblingElement("object"))
        {
            Label temp;
            string className = item->FirstChildElement("name")->GetText();
            auto found = std::find(names.begin(), names.end(), className);
            if (found == names.end())
            {
                throw std::exception("输入的分类不存在");
            }
            temp.category = int(found - names.begin());
            float xmin = item->FirstChildElement("bndbox")->FirstChildElement("xmin")->FloatText();
            float xmax = item->FirstChildElement("bndbox")->FirstChildElement("xmax")->FloatText();
            float ymin = item->FirstChildElement("bndbox")->FirstChildElement("ymin")->FloatText();
            float ymax = item->FirstChildElement("bndbox")->FirstChildElement("ymax")->FloatText();
            // 此处转换成把图片缩放到416×416后的矩形位置
            temp.center.x = 0.5f * (xmax + xmin) * ratio;
            temp.center.y = 0.5f * (ymax + ymin) * ratio;
            temp.size.width = (xmax - xmin) * ratio;
            temp.size.height = (ymax - ymin) * ratio;
            myLabels.push_back(temp);
        }
        cates.push_back(std::move(myLabels));
    }
}

array<Size2f, 9> MDataset::kmeansClassify() const
{
    array<Size2f, 9> nine;
    vector<Size2f> samples;
    for (const auto& onepic : cates)
    {
        for (const auto& item : onepic)
        {
            samples.push_back(item.size);
        }
    }
    vector<int> indices;
    kmeans(samples, 9, indices, TermCriteria(3, 1000, 0.1), 3, KMEANS_RANDOM_CENTERS);
    array<int, 9> esize;
    esize.fill(0);
    int count = (int)samples.size();
    for (int i = 0; i < count; i++)
    {
        nine[indices[i]].width += samples[i].width;
        nine[indices[i]].height += samples[i].height;
        esize[indices[i]]++;
    }
    for (int i = 0; i < 9; i++)
    {
        nine[i].width /= esize[i];
        nine[i].height /= esize[i];
    }
    std::sort(nine.begin(), nine.end(), 
        [](const Size2f& a, const Size2f& b) { return a.area() < b.area(); });
    return nine;
}

float MDataset::calcIntersect(Rect2f a, Rect2f b)
{
    float arx = a.x + a.width;
    float ary = a.y + a.height;
    float brx = b.x + b.width;
    float bry = b.y + b.height;
    float crossx = std::max(a.x, b.x);
    float crossy = std::max(a.y, b.y);
    float crossw = std::min(arx, brx) - crossx;
    float crossh = std::min(ary, bry) - crossy;
    if (crossw > 0 && crossh > 0)
    {
        float scross = crossw * crossh;
        float sa = a.width * a.height;
        float sb = b.width * b.height;
        return scross / (sa + sb - scross + FLT_EPSILON);
    }
    return 0;
}

//---------------------------------------------------------------------------------------
// 计算输入的标签和预设的矩形(predefined)中的哪一个最相似
//---------------------------------------------------------------------------------------
int MDataset::calcMaximumSimilarIndex(const Label& label)
{
    float maxv = 0;
    int maxi = -1;
    for (int i = 0; i < std::size(predefined); i++)
    {
        Rect2f rectPred(Point2f(), predefined[i]);
        Rect2f rectLabel(Point2f(), label.size);
        float overlapped = calcIntersect(rectLabel, rectPred);
        if (overlapped > maxv)
        {
            maxv = overlapped;
            maxi = i;
        }
    }
    return maxi;
}

void MDataset::zeroOneObject25(Tensor oneLine, int start)
{
    for (int i = 0; i < 25; i++)
    {
        oneLine[start + i] = 0; // 只对一个标记置零，25个元素
    }
}

Tensor MDataset::makeTarget(const vector<Label>& labels)
{
    Tensor target = torch::zeros({ M52_UPPER, 75 });
    for (auto& item : labels)
    {
        int which = calcMaximumSimilarIndex(item);
        int index = (which % 3) * 25; // which信息可以还原数据
        if (which < 3)
        {
            int64 ccol = floor(item.center.x / (PSIZE / MUL_13));
            int64 crow = floor(item.center.y / (PSIZE / MUL_13));
            int64 frow = crow * MUL_13 + ccol;
            zeroOneObject25(target[frow], index); // 一行置零
            target[frow][index + item.category] = 1.0f;
            target[frow][index + 20] = 1.0f;
            target[frow][index + 21] = fmodf(item.center.x, PSIZE / MUL_13) / (PSIZE / MUL_13);
            target[frow][index + 22] = fmodf(item.center.y, PSIZE / MUL_13) / (PSIZE / MUL_13);
            target[frow][index + 23] = logf(item.size.width / predefined[which].width);
            target[frow][index + 24] = logf(item.size.height / predefined[which].height);
            //cout << "SS13 " << item.center.x << " " << item.center.y << " || " << 
            //    target[frow][index + 21].item<float>() << " " << target[frow][index + 22].item<float>() << endl;
        }
        else if (which < 6)
        {
            int64 ccol = floor(item.center.x / (PSIZE / MUL_26));
            int64 crow = floor(item.center.y / (PSIZE / MUL_26));
            int64 frow = M13_UPPER + crow * MUL_26 + ccol;
            zeroOneObject25(target[frow], index); // 一行置零
            target[frow][index + item.category] = 1.0f;
            target[frow][index + 20] = 1.0f;
            target[frow][index + 21] = fmodf(item.center.x, PSIZE / MUL_26) / (PSIZE / MUL_26);
            target[frow][index + 22] = fmodf(item.center.y, PSIZE / MUL_26) / (PSIZE / MUL_26);
            target[frow][index + 23] = logf(item.size.width / predefined[which].width);
            target[frow][index + 24] = logf(item.size.height / predefined[which].height);
        }
        else /* if (which < 9) */
        {
            int64 ccol = floor(item.center.x / (PSIZE / MUL_52));
            int64 crow = floor(item.center.y / (PSIZE / MUL_52));
            int64 frow = M26_UPPER + crow * MUL_52 + ccol;
            zeroOneObject25(target[frow], index); // 一行置零
            target[frow][index + item.category] = 1.0f;
            target[frow][index + 20] = 1.0f;
            target[frow][index + 21] = fmodf(item.center.x, PSIZE / MUL_52) / (PSIZE / MUL_52);
            target[frow][index + 22] = fmodf(item.center.y, PSIZE / MUL_52) / (PSIZE / MUL_52);
            target[frow][index + 23] = logf(item.size.width / predefined[which].width);
            target[frow][index + 24] = logf(item.size.height / predefined[which].height);
        }
    }
    return target;
}

void MDataset::testShowTarget(const Mat& image, const string& file, Tensor target)
{
    size_t found = file.find_last_of('/');
    string title = file.substr(found + 1);
    Mat color = image.clone();
    for (int i = 0; i < M13_UPPER; i++)
    {
        for (int offset = 0; offset < 75; offset += 25)
        {
            if (target[i][offset + 20].item<float>() != 0)
            {
                int which = offset / 25;
                int icx = DIV_1_13 * ((i % MUL_13) + target[i][offset + 21].item<float>()) * PSIZE;
                int icy = DIV_1_13 * ((i / MUL_13) + target[i][offset + 22].item<float>()) * PSIZE;
                int icw = expf(target[i][offset + 23].item<float>()) * predefined[which].width;
                int ich = expf(target[i][offset + 24].item<float>()) * predefined[which].height;
                rectangle(color, Rect(icx - icw / 2, icy - ich / 2, icw, ich), Scalar(0, 0, 255));
                torch::Tensor select = torch::arange(offset, offset + 20);
                int nameIndex = target[i].index_select(0, select).argmax(0).item<int>();
                putText(color, names[nameIndex], Point2i(icx - icw / 2, icy - ich / 2 + 14), 0, 0.6, Scalar(0, 255, 0));
                putText(color, title, Point2i(0, 14), 0, 0.6, Scalar(255, 0, 112));
            }
        }
    }
    for (int i = M13_UPPER; i < M26_UPPER; i++)
    {
        for (int offset = 0; offset < 75; offset += 25)
        {
            if (target[i][offset + 20].item<float>() != 0)
            {
                int which = offset / 25 + 3;
                int icx = DIV_1_26 * (((i - M13_UPPER) % MUL_26) + target[i][offset + 21].item<float>()) * PSIZE;
                int icy = DIV_1_26 * (((i - M13_UPPER) / MUL_26) + target[i][offset + 22].item<float>()) * PSIZE;
                int icw = expf(target[i][offset + 23].item<float>()) * predefined[which].width;
                int ich = expf(target[i][offset + 24].item<float>()) * predefined[which].height;
                rectangle(color, Rect(icx - icw / 2, icy - ich / 2, icw, ich), Scalar(0, 0, 255));
                torch::Tensor select = torch::arange(offset, offset + 20);
                int nameIndex = target[i].index_select(0, select).argmax(0).item<int>();
                putText(color, names[nameIndex], Point2i(icx - icw / 2, icy - ich / 2 + 14), 0, 0.6, Scalar(0, 255, 0));
                putText(color, title, Point2i(0, 14), 0, 0.6, Scalar(255, 0, 112));
            }
        }
    }
    for (int i = M26_UPPER; i < M52_UPPER; i++)
    {
        for (int offset = 0; offset < 75; offset += 25)
        {
            if (target[i][offset + 20].item<float>() != 0)
            {
                int which = offset / 25 + 6;
                int icx = DIV_1_52 * (((i - M26_UPPER) % MUL_52) + target[i][offset + 21].item<float>()) * PSIZE;
                int icy = DIV_1_52 * (((i - M26_UPPER) / MUL_52) + target[i][offset + 22].item<float>()) * PSIZE;
                int icw = expf(target[i][offset + 23].item<float>()) * predefined[which].width;
                int ich = expf(target[i][offset + 24].item<float>()) * predefined[which].height;
                rectangle(color, Rect(icx - icw / 2, icy - ich / 2, icw, ich), Scalar(0, 0, 255));
                torch::Tensor select = torch::arange(offset, offset + 20);
                int nameIndex = target[i].index_select(0, select).argmax(0).item<int>();
                putText(color, names[nameIndex], Point2i(icx - icw / 2, icy - ich / 2 + 14), 0, 0.6, Scalar(0, 255, 0));
                putText(color, title, Point2i(0, 14), 0, 0.6, Scalar(255, 0, 112));
            }
        }
    }
    imshow("样本图片", color);
    if (color.cols < 480)
    {
        resizeWindow("样本图片", 480, color.rows);
    }
    waitKey(5000);
}

void MDataset::setRandomize(bool irand)
{
    rand = irand;
}

torch::data::Example<> MDataset::get(size_t index)
{
    Mat loads = imread(images[index], IMREAD_COLOR);
    Mat image = scaleKeepRatio(loads, Size(PSIZE, PSIZE));
    vector<Label> labels = cates[index];
    if (rand)
    {
        tryTransform(image, labels);
    }

    Tensor target = makeTarget(labels);
#ifdef SHOW_FINAL_TARGET // 显示变换后的图片标签
    testShowTarget(image, images[index], target);
#endif

    Tensor imageTensor = torch::from_blob(image.data, { image.rows, image.cols, 3 }, torch::kByte);
    imageTensor = imageTensor.permute({ 2, 0, 1 }).toType(torch::kF32).div(255.0f);
    // 对Tensor的操作只要不克隆，它的内存顺序就不会变(permute)
    return { imageTensor, target };
}

torch::optional<size_t> MDataset::size() const
{
    return images.size();
}

void MDataset::tryTransform(Mat& input, vector<Label>& labels)
{
    float ratio = float(rng);
    ratio = float(rng);
    if (ratio < 0.6f)
    {
        mosaic(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.5f)
    {
        magnify(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.5f)
    {
        translate(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.5f)
    {
        flip(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.2f)
    {
        reexposure(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.15f)
    {
        blur(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.25f)
    {
        noise(input, labels);
    }
}

//---------------------------------------------------------------------------------------
// 随机平移
//---------------------------------------------------------------------------------------
void MDataset::translate(Mat& input, vector<Label>& labels) const
{
    Point2f minxy(PSIZE, PSIZE);
    Point2f maxxy(0, 0);
    for (const auto& item : labels)
    {
        minxy.x = std::min(minxy.x, item.center.x - 0.5f * item.size.width);
        minxy.y = std::min(minxy.y, item.center.y - 0.5f * item.size.height);
        maxxy.x = std::max(maxxy.x, item.center.x + 0.5f * item.size.width);
        maxxy.y = std::max(maxxy.y, item.center.y + 0.5f * item.size.height);
    }
    float errx1 = -minxy.x + 1; /* 设置1px容差 */
    float errx2 = input.cols - maxxy.x - 1;
    float erry1 = -minxy.y + 1;
    float erry2 = input.rows - maxxy.y - 1;
    if (errx2 - errx1 < 5 && erry2 - erry1 < 5)
    {
        return; // 容差太小就不平移了
    }
    float movx = rng.uniform(errx1, errx2);
    float movy = rng.uniform(erry1, erry2);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = 1;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = movx;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = 1;
    trans.at<float>(1, 2) = movy;
    warpAffine(input, input, trans, input.size());
    for (auto& item : labels)
    {
        item.center.x += movx;
        item.center.y += movy;
    }
}

//---------------------------------------------------------------------------------------
// 随机缩放0.6~1.4倍
//---------------------------------------------------------------------------------------
void MDataset::magnify(Mat& input, vector<Label>& labels) const
{
    float minScale;
    float maxScale;
    Size2f mins(PSIZE, PSIZE);
    for (const auto& item : labels)
    {
        mins.width = std::min(mins.width, item.size.width);
        mins.height = std::min(mins.height, item.size.height);
    }
    minScale = std::max(0.6f, 20 / std::min(mins.width, mins.height));
    Point2f maxxy(0, 0);
    for (const auto& item : labels)
    {
        maxxy.x = std::max(maxxy.x, item.center.x + 0.5f * item.size.width);
        maxxy.y = std::max(maxxy.y, item.center.y + 0.5f * item.size.height);
    }
    maxScale = std::min(1.4f, (PSIZE - 1) / std::max(maxxy.x, maxxy.y)); // -1留点余量
    if (minScale >= maxScale)
    {
        return; // 不能缩放
    }
    float thisScale = rng.uniform(minScale, maxScale);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = thisScale;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = 0;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = thisScale;
    trans.at<float>(1, 2) = 0;
    warpAffine(input, input, trans, input.size());
    for (auto& item : labels)
    {
        item.center.x *= thisScale;
        item.center.y *= thisScale;
        item.size.width *= thisScale;
        item.size.height *= thisScale;
    }
}

//---------------------------------------------------------------------------------------
// 随机水平反转
//---------------------------------------------------------------------------------------
void MDataset::flip(Mat& input, vector<Label>& labels) const
{
    bool fx = rng(2);
    bool fy = 0/*rng(2)*/;
    if (!fx && !fy)
    {
        return; // 不需反转就直接返回
    }
    Mat xcoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < xcoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            xcoor.at<float>(i, j) = float(fx ? xcoor.cols - j : j);
        }
    }
    Mat ycoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < ycoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            ycoor.at<float>(i, j) = float(fy ? ycoor.rows - i : i);
        }
    }
    remap(input, input, xcoor, ycoor, INTER_LINEAR);
    if (fx)
    {
        for (auto& item : labels)
        {
            item.center.x = input.cols - 1 - item.center.x;
        }
    }
    if (fy)
    {
        for (auto& item : labels)
        {
            item.center.y = input.rows - 1 - item.center.y;
        }
    }
}

//---------------------------------------------------------------------------------------
// 随机颜色亮度对比度
//---------------------------------------------------------------------------------------
void MDataset::reexposure(Mat& input, vector<Label>& labels) const
{
    Mat hsvImage;
    int hue = rng.uniform(-25, 25);
    int expose = rng.uniform(-40, 45);
    cvtColor(input, hsvImage, COLOR_BGR2HSV);
    Mat channel[3];
    split(hsvImage, channel);
    // 调节色相，这是0~180循环的
    for (int i = 0; i < channel[0].rows; i++)
    {
        uchar* rptr = channel[0].ptr<uchar>(i);
        for (int j = 0; j < channel[0].cols; j++)
        {
            rptr[j] = (rptr[j] + hue + 180) % 180;
        }
    }
    // 调节亮度，这是0~255范围
    channel[2] += expose;
    merge(channel, 3, hsvImage);
    cvtColor(hsvImage, input, COLOR_HSV2BGR);
}

//---------------------------------------------------------------------------------------
// 随机模糊
//---------------------------------------------------------------------------------------
void MDataset::blur(Mat& input, vector<Label>& labels) const
{
    GaussianBlur(input, input, Size(3, 3), 0);
}

//---------------------------------------------------------------------------------------
// 随机噪声比例约3.5%
//---------------------------------------------------------------------------------------
void MDataset::noise(Mat& input, vector<Label>& labels) const
{
    Mat noise(input.size(), CV_8SC4);
    cv::randu(noise, -128, 128);
    for (int i = 0; i < noise.rows; i++)
    {
        Vec<char, 4> *rptr = noise.ptr<Vec<char, 4>>(i);
        Vec3b* wptr = input.ptr<Vec3b>(i);
        for (int j = 0; j < noise.cols; j++)
        {
            if (rptr[j][3] >= 121)
            {
                wptr[j][0] += rptr[j][0];
                wptr[j][1] += rptr[j][1];
                wptr[j][2] += rptr[j][2];
            }
        }
    }
}

bool MDataset::canMosaic(const vector<Label>& label) const
{
    Size2f mins(1e12, 1e12);
    for (const auto& item : label)
    {
        if (mins.width > item.size.width)
        {
            mins.width = item.size.width;
        }
        if (mins.height > item.size.height)
        {
            mins.height = item.size.height;
        }
    }
    if (mins.width >= 60 && mins.height >= 60)
    {
        return true; /* 我们设宽高大于60可马赛克缩小 */
    }
    return false;
}

//---------------------------------------------------------------------------------------
// 马赛克拼图。4张图合成1张
//---------------------------------------------------------------------------------------
void MDataset::mosaic(Mat& input, vector<Label>& labels) const
{
    vector<int> indices(1, -1); /* -1代表input */
    if (!canMosaic(labels))
    {
        return; // 不能马赛克
    }
    for (int i = 0; i < 3; i++)
    {
        while (true)
        {
            int idx = rng(images.size());
            // 找最小的标注框大小，如果标注框大于60才缩图启用
            // 马赛克增强，否则换一张图
            if (canMosaic(cates[idx]))
            {
                indices.push_back(idx);
                break;
            }
        }
    }
    std::shuffle(indices.begin(), indices.end(), std::default_random_engine());
    Mat inImage(input.size(), input.type());
    for (int i = 0; i < 4; i++)
    {
        Mat r1 = inImage(Rect((i % 2) * PSIZE / 2, (i / 2) * PSIZE / 2, PSIZE / 2, PSIZE / 2));
        Mat source = input;
        if (indices[i] >= 0)
        {
            source = imread(images[indices[i]], IMREAD_COLOR);
            source = scaleKeepRatio(source, Size(PSIZE, PSIZE));
        }
        resize(source, r1, r1.size());
    }
    input = inImage; /* 合成图片 */
    vector<Label> inLabels;
    for (int i = 0; i < 4; i++)
    {
        const vector<Label>* temp = &labels;
        if (indices[i] >= 0)
        {
            temp = &cates[indices[i]];
        }
        for (const auto& item : *temp)
        {
            Label newLabel;
            newLabel.category = item.category;
            newLabel.center.x = 0.5f * item.center.x + (i % 2) * PSIZE / 2;
            newLabel.center.y = 0.5f * item.center.y + (i / 2) * PSIZE / 2;
            newLabel.size.width = 0.5f * item.size.width;
            newLabel.size.height = 0.5f * item.size.height;
            inLabels.push_back(newLabel);
        }
    }
    labels = std::move(inLabels); /* 合成标签 */
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// LOSS的每一行75个元素，组成：(20[class] + 1[conf] + 4[xywh])×3
//---------------------------------------------------------------------------------------
void train(Yolo& net)
{
    //torch::load(net, "yolov3.pt");
    torch::optim::Adam optimizer(net->parameters(), torch::optim::AdamOptions(0.0002));
    //torch::load(optimizer, "yolov3-opt.pt");

    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;
    MapDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes).
        map(torch::data::transforms::Stack<>());
    DataLoader dataLoader = torch::data::make_data_loader(myDataset, 8);

    MDataset checkDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "val", classes);
    checkDataset.setRandomize(false);
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();

    Curve curve(600, 900);

    net->to(c10::kCUDA);
    MDiyLoss lossFunc;
    int miniBatch = 0;
    for (int i = 0; i < 300; i++)
    {
        net->train();
        cout << "新的批次开始了！\r\n";
        for (auto& batch : *dataLoader) // 训练
        {
            waitKey(1); // 给OpenCV窗口处理时间

            Tensor input = batch.data.to(c10::kCUDA);
            Tensor target = batch.target.to(c10::kCUDA);
            Tensor predict = net->forward(input);
            Tensor loss = lossFunc(predict, target);
            loss.backward();
            if (miniBatch % 10 == 9)
            {
                optimizer.step();
                optimizer.zero_grad();
            }
            if (miniBatch % 200 == 0)
            {
                float lossValue = loss.item<float>();
                long long time = getTickCount() / getTickFrequency();
                cout << "批次: " << miniBatch << "。训练损失：" << lossValue << "。时间1：" << time << endl;
                curve.append(0, miniBatch / 200, lossValue);
            }
            miniBatch++;
        }
        if (miniBatch > 25000) // 验证
        {
            net->eval();
            float predCount = 0;
            int count = (int)myDataset.size().value();
            // 随机抽取60个图片用于验证
            for (int i = 0; i < 60; i++)
            {
                torch::data::Example<> record = checkDataset.get(rng.uniform(0, count));
                vector<ObjectFound> results;
                Tensor input = record.data.unsqueeze_(0).to(c10::kCUDA);
                net->predict(input, results);
                predCount += !results.empty();
            }
            curve.append(1, miniBatch / 200, predCount / 60.0f);
        }
        if (i % 3 == 2)
        {
            torch::save(net, "yolov3.pt");
            torch::save(optimizer, "yolov3-opt.pt");
        }
    }
}

//---------------------------------------------------------------------------------------
// 这是验证集校验的函数。我训练YOLO只能训练到LOSS降低至0.02左右，基本就不再下降了。在
// 此函数验证时能预测到物体的概率在40%左右。这个网络定位效果明显比YOLOV1好
//---------------------------------------------------------------------------------------
void check(Yolo& net)
{
    MDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes);
    myDataset.setRandomize(false);
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();

    torch::load(net, "yolov3.pt");

    net->to(c10::kCUDA);
    net->eval();
    int count = (int)myDataset.size().value();
    for (int i = 0; i < 20; i++)
    {
        torch::data::Example<> record = myDataset.get(rng.uniform(0, count));
        Mat image(record.data.size(1), record.data.size(2), CV_32FC3, record.data.data_ptr());
        vector<ObjectFound> results;
        Tensor input = record.data.unsqueeze_(0).to(c10::kCUDA);
        net->predict(input, results);
        
        for (const auto& item : results)
        {
            rectangle(image, Rect(item.bound), Scalar(0, 0, 1.0));
            string name = classes[item.category] + "(" + std::to_string(int(item.confidence * 100)) + ")";
            putText(image, name, Point2i(item.bound.x, item.bound.y + 14), 0, 0.6, Scalar(0, 0.5, 0.95));
        }
        int s = 1;
        for (int t = 0; t < record.target.size(0); t++)
        {
            for (int u = 0; u < 75; u += 25)
            {
                if (record.target[t][u + 20].item<int>() != 0)
                {
                    Tensor maxi = record.target[t].index_select(0, torch::arange(u, u + 20)).argmax(0);
                    putText(image, classes[maxi.item<int>()], Point2i(0, s * 16), 0, 0.6, Scalar(0.5, 0.95, 0));
                    s++;
                }
            }
        }
        imshow("预测图片", image);
        if (image.cols < 480)
        {
            resizeWindow("预测图片", 480, image.rows);
        }
        waitKey(3000);
    }
    destroyAllWindows();
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// YOLO-V3里面有9个预设的anchor分别对应3个尺度的3个输出。这9个anchor是根据你的样本标
// 注框取得的。在训练前需要先对样本标记框分成9类。然后把这9个框传入训练函数训练
//---------------------------------------------------------------------------------------
int main()
{
    int option;
    cout << "选择功能：\n0->预先对样本进行KMEANS分成9类；\n1->训练Yolo-V3；\n2->检验Yolo-V3；\n其它：退出程序。\n";
    cin >> option;
    if (option == 0)
    {
        MDataset dataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes);
        array<Size2f, 9> cates = dataset.kmeansClassify();
        for (auto item : cates)
        {
            cout << item.width << "," << item.height << endl;
        }
    }
    else if (option == 1)
    {
        Yolo yolo(20);
        train(yolo);
    }
    else if (option == 2)
    {
        Yolo yolo(20);
        check(yolo);
    }

    int z = 0;
    return 0;
}







