﻿#include "YoloV5.h"
#include "MDataset.h"
#include <iostream>

#pragma warning(disable:26451)

using namespace std;
using namespace cv;
using torch::nn::functional::binary_cross_entropy;
using torch::nn::functional::binary_cross_entropy_with_logits;
using torch::nn::functional::BinaryCrossEntropyFuncOptions;
using torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions;
using torch::nn::MaxPool2dOptions;
using torch::nn::DropoutOptions;
using torch::nn::BatchNorm2dOptions;
using torch::nn::Conv2dOptions;
using torch::nn::LeakyReLUOptions;
using torch::nn::UpsampleOptions;

namespace std
{
    string to_string(float num, int prec) // 把num转成字符串并保留小数点后prec位
    {
        string str = std::to_string(num);
        size_t pos = str.find('.');
        pos += prec + 1;
        if (pos < str.length())
        {
            str.erase(pos, -1);
        }
        while (str.back() == '0')
        {
            str.pop_back(); // 去掉末尾的0
        }
        if (str.back() == '.')
        {
            str.pop_back(); // 去掉末尾的小数点
        }
        return str;
    }
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

MConv2dImpl::MConv2dImpl(int input1, int input2, int kernel, int stride, int padding) : 
    conv1(nullptr), normal2(nullptr), relu3(nullptr)
{
    conv1 = Conv2d(Conv2dOptions(input1, input2, kernel).stride(stride).padding(padding).bias(false));
    normal2 = BatchNorm2d(BatchNorm2dOptions(input2));
    relu3 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv1 = register_module("conv1", conv1);
    normal2 = register_module("normal2", normal2);
    relu3 = register_module("relu3", relu3);
}

Tensor MConv2dImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    x = normal2->forward(x);
    x = relu3->forward(x);
    return x;
}

MFocusImpl::MFocusImpl(int input2, int kernel, int stride, int padding) : 
    convf1(nullptr)
{
    convf1 = Conv2d(Conv2dOptions(12, input2, kernel).stride(stride).padding(padding).bias(false));
    convf1 = register_module("convf1", convf1);
}

Tensor MFocusImpl::forward(Tensor x)
{
    /* x必须是4维的[batch, channel, row, col] */
    Tensor zeroIndex = torch::arange(0, PSIZE, 2, x.device());
    Tensor oneIndex = torch::arange(1, PSIZE, 2, x.device());
    Tensor t1 = x.index_select(2, zeroIndex).index_select(3, zeroIndex);
    Tensor t2 = x.index_select(2, oneIndex).index_select(3, zeroIndex);
    Tensor t3 = x.index_select(2, zeroIndex).index_select(3, oneIndex);
    Tensor t4 = x.index_select(2, oneIndex).index_select(3, oneIndex);
    Tensor mm = torch::concat({ t1, t2, t3, t4 }, 1);
    return convf1->forward(mm);
}

MResidualUnitImpl::MResidualUnitImpl(int input1, int medium, int input2) :
    conv1(nullptr), conv2(nullptr), normal3(nullptr), relu4(nullptr)
{
    conv1 = MConv2d(input1, medium, 1, 1, 0);
    conv1 = register_module("ru-conv1", conv1);

    conv2 = Conv2d(Conv2dOptions(medium, input2, 3).stride(1).padding(1).bias(false));
    normal3 = BatchNorm2d(BatchNorm2dOptions(input2));
    relu4 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv2 = register_module("ru-conv2", conv2);
    normal3 = register_module("ru-normal3", normal3);
    relu4 = register_module("ru-relu4", relu4);
}

Tensor MResidualUnitImpl::forward(Tensor x)
{
    Tensor y = conv1->forward(x);
    y = conv2->forward(y);
    y = normal3->forward(y);
    y += x;
    return relu4(y);
}

/* 此结构不降低网络的尺寸 */
MCsp1Impl::MCsp1Impl(int repeat, int input1, int medium) :
    conv1(nullptr), sconv2(nullptr), sconv3(nullptr), normal4(nullptr), 
    relu5(nullptr), conv6(nullptr)
{
    conv1 = MConv2d(input1, medium, 1, 1, 0); /* 缩尺寸 */
    sconv2 = Conv2d(Conv2dOptions(medium, medium, 1).stride(1).padding(0).bias(false));
    sconv3 = Conv2d(Conv2dOptions(input1, medium, 1).stride(1).padding(0).bias(false));
    normal4 = BatchNorm2d(BatchNorm2dOptions(input1));
    relu5 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv6 = MConv2d(input1, input1, 1, 1, 0);
    for (int i = 0; i < repeat; i++)
    {
        seq7->push_back(MResidualUnit(medium, medium, medium));
    }
    conv1 = register_module("csp1-conv1", conv1);
    sconv2 = register_module("csp1-sconv2", sconv2);
    sconv3 = register_module("csp1-sconv3", sconv3);
    normal4 = register_module("csp1-normal4", normal4);
    relu5 = register_module("csp1-relu5", relu5);
    conv6 = register_module("csp1-conv6", conv6);
    seq7 = register_module("csp1-seq7", seq7);
}

Tensor MCsp1Impl::forward(Tensor x)
{
    Tensor y = conv1->forward(x);
    y = seq7->forward(y);
    y = sconv2->forward(y);
    Tensor z = sconv3->forward(x);
    x = torch::concat({ y, z }, 1);
    x = normal4->forward(x);
    x = relu5->forward(x);
    x = conv6->forward(x);
    return x;
}

/* 此结构不降低网络的尺寸 */
MCsp2Impl::MCsp2Impl(int repeat, int input1, int medium, int input2) :
    conv1(nullptr), sconv2(nullptr), sconv3(nullptr), normal4(nullptr), 
    relu5(nullptr), conv6(nullptr)
{
    conv1 = MConv2d(input1, medium, 1, 1, 0); /* 缩尺寸 */
    sconv2 = Conv2d(Conv2dOptions(medium, medium, 1).stride(1).padding(0).bias(false));
    sconv3 = Conv2d(Conv2dOptions(input1, medium, 1).stride(1).padding(0).bias(false));
    normal4 = BatchNorm2d(BatchNorm2dOptions(input2));
    relu5 = LeakyReLU(LeakyReLUOptions().negative_slope(0.1));
    conv6 = MConv2d(input2, input2, 1, 1, 0);
    for (int i = 0; i < repeat; i++)
    {
        seq7->push_back(MConv2d(medium, medium, 1, 1));
        seq7->push_back(MConv2d(medium, medium, 3, 1, 1));
    }
    conv1 = register_module("csp2-conv1", conv1);
    sconv2 = register_module("csp2-sconv2", sconv2);
    sconv3 = register_module("csp2-sconv3", sconv3);
    normal4 = register_module("csp2-normal4", normal4);
    relu5 = register_module("csp2-relu5", relu5);
    conv6 = register_module("csp2-conv6", conv6);
    seq7 = register_module("csp2-seq7", seq7);
}

Tensor MCsp2Impl::forward(Tensor x)
{
    Tensor y = conv1->forward(x);
    y = seq7->forward(y);
    y = sconv2->forward(y);
    Tensor z = sconv3->forward(x);
    x = torch::concat({ y, z }, 1);
    x = normal4->forward(x);
    x = relu5->forward(x);
    x = conv6->forward(x);
    return x;
}

MSppfImpl::MSppfImpl(int input1, int kernel) : 
    conv1(nullptr), conv2(nullptr), pool3(nullptr)
{
    conv1 = MConv2d(input1, input1 / 2, 1, 1);
    conv2 = MConv2d(input1 * 2, input1, 1, 1);
    pool3 = MaxPool2d(MaxPool2dOptions(kernel).stride(1).padding(kernel / 2));
    conv1 = register_module("sppf-conv1", conv1);
    conv2 = register_module("sppf-conv2", conv2);
    pool3 = register_module("sppf-pool3", pool3);
}

Tensor MSppfImpl::forward(Tensor x)
{
    x = conv1->forward(x);
    Tensor w1 = pool3->forward(x);
    Tensor w2 = pool3->forward(w1);
    Tensor w3 = pool3->forward(w2);
    x = conv2->forward(torch::concat({ x, w1, w2, w3 }, 1));
    return x;
}

YoloImpl::YoloImpl(int classes) :
    focus00(nullptr), conv01(nullptr), csp02(nullptr), conv03(nullptr),
    csp04(nullptr), conv05(nullptr), csp06(nullptr), conv07(nullptr),
    sppf08(nullptr), csp09(nullptr), conv10(nullptr), us11(nullptr),
    csp12(nullptr), conv13(nullptr), us14(nullptr), csp15(nullptr), 
    sconv16(nullptr), conv17(nullptr), csp18(nullptr), sconv19(nullptr),
    conv20(nullptr), csp21(nullptr), sconv22(nullptr)
{
    int dimout = (classes + 5) * 3; /* 输入：3*608*608 */
    focus00 = MFocus(32, 3, 1, 1);
    focus00 = register_module("yl-focus00", focus00); /* 304*304 */

    conv01 = MConv2d(32, 64, 3, 2, 1); /* 152*152 */
    csp02 = MCsp1(1, 64, 32);
    conv03 = MConv2d(64, 128, 3, 2, 1); /* 76*76 */
    csp04 = MCsp1(3, 128, 64);
    conv05 = MConv2d(128, 256, 3, 2, 1); /* 38*38 */
    csp06 = MCsp1(3, 256, 128);
    conv07 = MConv2d(256, 512, 3, 2, 1); /* 19*19 */
    sppf08 = MSppf(512, 5);
    csp09 = MCsp2(1, 512, 256, 512);
    conv10 = MConv2d(512, 256, 1, 1);
    us11 = Upsample(UpsampleOptions().scale_factor(vector<double>({ 2.0, 2.0 })));
    conv01 = register_module("yl-conv01", conv01);
    csp02 = register_module("yl-csp02", csp02);
    conv03 = register_module("yl-conv03", conv03);
    csp04 = register_module("yl-csp04", csp04);
    conv05 = register_module("yl-conv05", conv05);
    csp06 = register_module("yl-csp06", csp06);
    conv07 = register_module("yl-conv07", conv07);
    sppf08 = register_module("yl-sppf08", sppf08);
    csp09 = register_module("yl-csp09", csp09);
    conv10 = register_module("yl-conv10", conv10);
    us11 = register_module("yl-us11", us11);

    csp12 = MCsp2(1, 512, 128, 256);
    conv13 = MConv2d(256, 128, 1, 1);
    us14 = Upsample(UpsampleOptions().scale_factor(vector<double>({ 2.0, 2.0 })));
    csp12 = register_module("yl-csp12", csp12);
    conv13 = register_module("yl-conv13", conv13);
    us14 = register_module("yl-us14", us14);

    csp15 = MCsp2(1, 256, 64, 128);
    sconv16 = Conv2d(Conv2dOptions(128, dimout, 1).stride(1).padding(0).bias(false)); /* output1 76*76 */
    conv17 = MConv2d(128, 128, 3, 2, 1);
    csp15 = register_module("yl-csp15", csp15);
    sconv16 = register_module("yl-sconv16", sconv16);
    conv17 = register_module("yl-conv17", conv17);

    csp18 = MCsp2(1, 256, 128, 256);
    sconv19 = Conv2d(Conv2dOptions(256, dimout, 1).stride(1).padding(0).bias(false)); /* output2 38*38 */
    conv20 = MConv2d(256, 256, 3, 2, 1);
    csp18 = register_module("yl-csp18", csp18);
    sconv19 = register_module("yl-sconv19", sconv19);
    conv20 = register_module("yl-conv20", conv20);

    csp21 = MCsp2(1, 512, 256, 512);
    sconv22 = Conv2d(Conv2dOptions(512, dimout, 1).stride(1).padding(0).bias(false)); /* output3 19*19 */
    csp21 = register_module("yl-csp21", csp21);
    sconv22 = register_module("yl-sconv22", sconv22);
}

Tensor YoloImpl::forward(Tensor x)
{
    x = focus00->forward(x);
    x = conv01->forward(x);
    x = csp02->forward(x);
    x = conv03->forward(x);
    Tensor x76 = csp04->forward(x);

    Tensor base1 = conv05->forward(x76);
    Tensor x38 = csp06->forward(base1);

    Tensor base2 = conv07->forward(x38);
    base2 = sppf08->forward(base2);
    base2 = csp09->forward(base2);
    Tensor x19 = conv10->forward(base2);
    
    Tensor cc01 = us11->forward(x19);
    cc01 = torch::concat({ x38, cc01 }, 1);
    cc01 = csp12->forward(cc01);
    Tensor base3 = conv13->forward(cc01);

    Tensor cc02 = us14->forward(base3);
    cc02 = torch::concat({ x76, cc02 }, 1);
    Tensor base4 = csp15->forward(cc02);
    Tensor out3 = sconv16->forward(base4); /* 输出76*76 */

    Tensor cc03 = conv17->forward(base4);
    cc03 = torch::concat({ base3, cc03 }, 1);
    Tensor base5 = csp18->forward(cc03);
    Tensor out2 = sconv19->forward(base5); /* 输出38*38 */

    Tensor cc04 = conv20->forward(base5);
    cc04 = torch::concat({ x19, cc04 }, 1);
    Tensor base6 = csp21->forward(cc04);
    Tensor out1 = sconv22->forward(base6); /* 输出19*19 */

    // 此处的反转通道是为了把BATCH*75*M*M转换成需要的BATCH*M*M*75
    // 然后再压缩成BATCH*(MM)*75，拼接在一起
    out1 = out1.permute({ 0, 2, 3, 1 });
    out1 = torch::flatten(out1, 1, 2);
    out2 = out2.permute({ 0, 2, 3, 1 });
    out2 = torch::flatten(out2, 1, 2);
    out3 = out3.permute({ 0, 2, 3, 1 });
    out3 = torch::flatten(out3, 1, 2);
    Tensor z = torch::concat({ out1, out2, out3 }, 1);
    return z;
}

bool YoloImpl::predict(Tensor x, vector<ObjectFound>& output)
{
    Tensor result = forward(x);
    vector<ObjectFound> founds;
    outputToVector(result, 0.5f, founds);
    nms(founds, 0.6f, output);
    return !output.empty();
}

void YoloImpl::outputToVector(Tensor input, float thres, vector<ObjectFound>& objects)
{
    // 所有的元素都需要sigmoid的
    Tensor result = torch::sigmoid(input.squeeze_(0));
    result = result.to(torch::kCPU);
    for (int i = 0; i < M76_UPPER; i++)
    {
        for (int j = 0; j < 75; j += 25)
        {
            const float* dptr = (const float*)result[i].const_data_ptr() + j;
            if (dptr[20] < thres)
            {
                continue;
            }
            int maxv = std::max_element(dptr, dptr + 20) - dptr;
            ObjectFound temp;
            temp.bound = calcPredictRect(i, j, dptr);
            temp.category = maxv;
            temp.confidence = dptr[20];
            objects.push_back(temp);
        }
    }
}

//---------------------------------------------------------------------------------------
// 局部极大值抑制。本函数按每一个分类进行的。即先把同一类的结果放一块，然后分别对每一
// 类进行极大值抑制。
// objects：输入所有结果
// thres：重叠度阈值（大于此值会被筛选掉）
// wells：抑制之后的输出
//---------------------------------------------------------------------------------------
void YoloImpl::nms(const vector<ObjectFound>& objects, float thres, vector<ObjectFound>& wells)
{
    vector<ObjectFound> oneclass;
    // 遍历20个分类，把相同类别放到oneclass里进行NMS
    for (int i = 0; i < 20; i++)
    {
        oneclass.clear();
        for (const auto &item : objects)
        {
            if (item.category == i)
            {
                oneclass.push_back(item);
            }
        }
        if (oneclass.empty())
        {
            continue; /* 空分类 */
        }
        // 下面是对一个单独的类NMS，先排序
        std::sort(oneclass.begin(), oneclass.end(),
            [](const ObjectFound& a, const ObjectFound& b) { return a.confidence > b.confidence; });
        vector<ObjectFound> filtered;
        while (!oneclass.empty())
        {
            filtered.push_back(oneclass.front());
            oneclass.erase(oneclass.begin());
            Rect2f curr = filtered.back().bound;
            for (auto iter = oneclass.begin(); iter != oneclass.end(); /*iter++*/)
            {
                if (calcIntersectRatio(curr, iter->bound) >= thres)
                {
                    iter = oneclass.erase(iter);
                    continue;
                }
                iter++;
            }
        }
        wells.insert(wells.end(), filtered.begin(), filtered.end());
    }
}

Rect2f YoloImpl::calcPredictRect(int row, int col, const float* oneLine)
{
    int rowBase = 0;
    int anchorBase = 0;
    int grain = MUL_19;
    if (row >= M38_UPPER)
    {
        rowBase = M38_UPPER;
        anchorBase = 6;
        grain = MUL_76;
    }
    else if (row >= M19_UPPER)
    {
        rowBase = M19_UPPER;
        anchorBase = 3;
        grain = MUL_38;
    }
    Rect2f result;
    int which = col / 25 + anchorBase;
    /* 中心点 */
    result.x = ((row - rowBase) % grain + (2 * oneLine[21] - 0.5f)) * PSIZE / grain;
    result.y = ((row - rowBase) / grain + (2 * oneLine[22] - 0.5f)) * PSIZE / grain;
    /* 计算宽高 */
    result.width = 4 * oneLine[23] * oneLine[23] * predefined[which].width;
    result.height = 4 * oneLine[24] * oneLine[24] * predefined[which].height;
    /* 中心点转左上角坐标 */
    result.x = result.x - 0.5f * result.width;
    result.y = result.y - 0.5f * result.height;
    return result;
}

/////////////////////////////////////////////////////////////////////////////////////////

MDiyLossImpl::MDiyLossImpl()
{
}

Tensor MDiyLossImpl::calcCiouLoss(Tensor a, Tensor b)
{
    Tensor px1 = a[0] - 0.5f * a[2];
    Tensor py1 = a[1] - 0.5f * a[3];
    Tensor px2 = a[0] + 0.5f * a[2];
    Tensor py2 = a[1] + 0.5f * a[3];
    Tensor qx1 = b[0] - 0.5f * b[2];
    Tensor qy1 = b[1] - 0.5f * b[3];
    Tensor qx2 = b[0] + 0.5f * b[2];
    Tensor qy2 = b[1] + 0.5f * b[3];
    Tensor s1 = (torch::min(px2, qx2) - torch::max(px1, qx1)).clamp_min(0) * (torch::min(py2, qy2) - torch::max(py1, qy1)).clamp_min(0);
    Tensor s2 = a[2] * a[3] + b[2] * b[3] - s1;
    Tensor iou = s1 / s2; /* IOU */

    Tensor cdist = torch::pow(a[0] - b[0], 2) + torch::pow(a[1] - b[1], 2);
    Tensor wdist = torch::pow(torch::max(px2, qx2) - torch::min(px1, qx1), 2) + torch::pow(torch::max(py2, qy2) - torch::min(py1, qy1), 2);
    Tensor diou = cdist / wdist; /* DIOU */

    Tensor vei = 4.0f / (M_PI * M_PI) * torch::pow(torch::atan(a[2] / a[3].clamp_min(FLT_EPSILON)) -
        torch::atan(b[2] / b[3].clamp_min(FLT_EPSILON)), 2);
    Tensor ciou = vei * vei / (1 - iou + vei).clamp_min(FLT_EPSILON); /* CIOU */

    Tensor result = (1 - iou + diou + ciou).sum();
    return result;
}

void MDiyLossImpl::forEach(vector<int64>& vecs, int64 base, int64 start, int64 loopCount)
{
    int64 end = start + loopCount;
    for (int64 i = start; i < end; i++)
    {
        vecs.push_back(base + i);
    }
}

//---------------------------------------------------------------------------------------
// LOSS的前361行是19×19，中间676行是38×38，后面2704行是76×76。每一行组成是3个预测
// 结果：(20[class] + 1[conf] + 4[xywh])×3。
// NOTE：现在的方法是，先统计有物体和没有物体的格子，然后用take函数批量选择数据再送到
// 对应的损失函数里计算。这可以充分利用GPU的并行计算能力
//---------------------------------------------------------------------------------------
Tensor MDiyLossImpl::forward(Tensor predict, Tensor target)
{
    const float weights[] = { 0.4f, 1.0f, 4.0f }; /* 不同物体的权重 */
    Tensor error = torch::tensor(0.0f, predict.device());
    int batchSize = predict.size(0);

    vector<int64> allConfs;
    allConfs.reserve(predict.numel());
    for (int i = 0; i < batchSize; i++)
    {
        for (int j = 0; j < M76_UPPER; j++)
        {
            allConfs.push_back(i * M76_UPPER * 75 + j * 75 + 20);
            allConfs.push_back(i * M76_UPPER * 75 + j * 75 + 45);
            allConfs.push_back(i * M76_UPPER * 75 + j * 75 + 70);
        }
    }
    Tensor allConfIdxs = torch::from_blob(allConfs.data(), { batchSize, M76_UPPER, 3 }, c10::kLong);
    allConfIdxs = allConfIdxs.to(c10::kCUDA);
    /* 先把有无物体数据搬到CPU里，CPU取索引效率高 */
    Tensor cpuConf = target.take(allConfIdxs).to(c10::kCPU);

    vector<int64> classes;
    vector<int64> rects;
    vector<int64> objConfs;
    vector<int64> noneConfs;
    vector<int> rectIndices;
    vector<float> confWeights;
    for (int i = 0; i < batchSize; i++)
    {
        int anchorBase = 0; // 预定义锚框
        for (int j = 0; j < M76_UPPER; j++)
        {
            if (j == M19_UPPER || j == M38_UPPER)
            {
                anchorBase += 3; // 预定义锚框偏移
            }
            for (int k = 0; k < 3; k++)
            {
                if (cpuConf[i][j][k].item<float>() != 0) /* 有物体 */
                {
                    forEach(classes, i * M76_UPPER * 75 + j * 75, k * 25 + 0, 20);
                    forEach(objConfs, i * M76_UPPER * 75 + j * 75, k * 25 + 20, 1);
                    forEach(rects, i * M76_UPPER * 75 + j * 75, k * 25 + 21, 4);
                    confWeights.push_back(weights[anchorBase / 3]);
                    rectIndices.push_back(anchorBase + k);
                }
                else /* 没有物体 */
                {
                    forEach(noneConfs, i * M76_UPPER * 75 + j * 75, k * 25 + 20, 1);
                }
            }
        }
    }
    if (!objConfs.empty())
    {
        Tensor classTsr = torch::from_blob(classes.data(), { (int)classes.size() / 20, 20 }, c10::kLong);
        classTsr = classTsr.to(c10::kCUDA);
        Tensor e1 = 0.3f * binary_cross_entropy_with_logits(predict.take(classTsr), target.take(classTsr),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum));
        error += e1;
        cout << "e1=" << e1.item<float>() << ", ";

        /* 小物体4.0，中物体1，大物体0.4 */
        Tensor weightTsr = torch::from_blob(confWeights.data(), { (int)confWeights.size() }, c10::kFloat);
        weightTsr = weightTsr.to(c10::kCUDA);
        Tensor confTsr = torch::from_blob(objConfs.data(), { (int)objConfs.size() }, c10::kLong);
        confTsr = confTsr.to(c10::kCUDA);
        ASSERT(target.take(confTsr).count_nonzero().item<int>() == confTsr.size(0));
        Tensor e2 = 0.8f * binary_cross_entropy_with_logits(predict.take(confTsr), target.take(confTsr),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum).weight(weightTsr));
        error += e2;
        cout << "e2=" << e2.item<float>() << ", ";

        Tensor rectTsr = torch::from_blob(rects.data(), { (int)rects.size() / 4, 4 }, c10::kLong);
        rectTsr = rectTsr.to(c10::kCUDA).t(); /* 转置为4行便于计算 */
        Tensor recta = outDataToRect(predict.take(rectTsr), rectIndices, true);
        Tensor rectb = target.take(rectTsr);
        Tensor e3 = 0.3f * calcCiouLoss(recta, rectb);
        error += e3;
        cout << "e3=" << e3.item<float>() << ", ";
    }
    if (!noneConfs.empty())
    {
        Tensor noneConfTsr = torch::from_blob(noneConfs.data(), { (int)noneConfs.size() }, c10::kLong);
        noneConfTsr = noneConfTsr.to(c10::kCUDA);
        ASSERT(target.take(noneConfTsr).count_nonzero().item<int>() == 0);
        Tensor e4 = 0.4f * binary_cross_entropy_with_logits(predict.take(noneConfTsr), target.take(noneConfTsr),
            BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kSum));
        error += e4;
        cout << "e4=" << e4.item<float>() << endl;
    }
    int count = (int)(classes.size() + objConfs.size() + rects.size() / 4 + noneConfs.size());
    error /= count;

    //static int looper = 0;
    //if (looper % 10 == 9)
    //{
    //    cout << "当前批次：" << error.item<float>() << endl;
    //}
    //looper++;
    return error;
}

//---------------------------------------------------------------------------------------
// 网格数据转真实矩形。由于对比的预测矩形和标签矩形在同一个格子里，偏移量相同，所
// 以这里就只计算矩形相对于格子左上角的位置。批量计算
//---------------------------------------------------------------------------------------
Tensor MDiyLossImpl::outDataToRect(Tensor xdata, const vector<int>& rectIndices, bool transform)
{
    const float gridSizes[] = { DIV_1_19 * PSIZE, DIV_1_38 * PSIZE, DIV_1_76 * PSIZE };
    Tensor rect = torch::empty_like(xdata);
    vector<float> offsets;
    vector<float> wcoes, hcoes;
    ASSERT(xdata.size(1) == rectIndices.size());
    for (auto index : rectIndices)
    {
        offsets.push_back(gridSizes[index / 3]);
        wcoes.push_back(4 * predefined[index].width);
        hcoes.push_back(4 * predefined[index].height);
    }
    Tensor offsetRate = torch::from_blob(offsets.data(), { (int)offsets.size() }, c10::kFloat);
    Tensor widthRate = torch::from_blob(wcoes.data(), { (int)wcoes.size() }, c10::kFloat);
    Tensor heightRate = torch::from_blob(hcoes.data(), { (int)hcoes.size() }, c10::kFloat);
    offsetRate = offsetRate.to(torch::kCUDA);
    widthRate = widthRate.to(torch::kCUDA);
    heightRate = heightRate.to(torch::kCUDA);
    rect[0] = (torch::sigmoid(xdata[0]) * 2 - 0.5f) * offsetRate;
    rect[1] = (torch::sigmoid(xdata[1]) * 2 - 0.5f) * offsetRate;
    rect[2] = widthRate * torch::pow(torch::sigmoid(xdata[2]), 2);
    rect[3] = heightRate * torch::pow(torch::sigmoid(xdata[3]), 2);
    //cout << rect.t() << endl;
    return rect;
}

/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// LOSS的每一行75个元素，组成：(20[class] + 1[conf] + 4[xywh])×3
//---------------------------------------------------------------------------------------
void train(Yolo& net)
{
    //torch::load(net, "yolov5.pt");
    torch::optim::Adam optimizer(net->parameters(), torch::optim::AdamOptions(0.0003));
    //torch::load(optimizer, "yolov5-opt.pt");

    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;
    MDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes);
    DataLoader dataLoader = torch::data::make_data_loader(myDataset.map(torch::data::transforms::Stack<>()), 12);

    MDataset checkDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "val", classes);
    checkDataset.setRandomize(false);
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();

    Curve curve(600, 900);

    net->to(torch::kCUDA);
    MDiyLoss lossFunc;
    int miniBatch = 0;
    for (int i = 0; i < 900; i++)
    {
        net->train();
        for (auto& batch : *dataLoader)
        {
            waitKey(1); // 处理opencv的窗口消息
            Tensor input = batch.data.to(torch::kCUDA);
            Tensor target = batch.target.to(torch::kCUDA);
            Tensor predict = net->forward(input);
            Tensor loss = lossFunc(predict, target);
            loss.backward();
            if (miniBatch % 12 == 0)
            {
                optimizer.step();
                optimizer.zero_grad();
            }
            if (miniBatch % 200 == 0)
            {
                float lossVal = loss.item<float>();
                long long time = getTickCount() / getTickFrequency();
                cout << "批次: " << i << "。训练损失：" << lossVal << "。时间1：" << time << endl;
                curve.append(0, miniBatch / 200, lossVal);
            }
            miniBatch++;
        }
        if (miniBatch >= 20000)
        {
            net->eval();
            float predCount = 0;
            int count = (int)checkDataset.size().value();
            // 随机抽取60个图片用于验证
            for (int i = 0; i < 60; i++)
            {
                Example<> record = checkDataset.get(rng.uniform(0, count));
                vector<ObjectFound> results;
                Tensor input = record.data.unsqueeze_(0).to(torch::kCUDA);
                net->predict(input, results);
                predCount += (!results.empty());
            }
            curve.append(1, miniBatch / 200, predCount / 60.0f);
        }
        if (i % 5 == 4)
        {
            torch::save(net, "yolov5.pt");
            torch::save(optimizer, "yolov5-opt.pt");
        }
    }
}

//---------------------------------------------------------------------------------------
// 验证集校验的函数。训练YOLOV5可以把LOSS从0.26降低至0.002左右，基本就不再下降了
//---------------------------------------------------------------------------------------
void check(Yolo& net)
{
    MDataset myDataset = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "val", classes);
    myDataset.setRandomize(false);
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();

    torch::load(net, "yolov5.pt");

    net->to(c10::kCUDA);
    net->eval();
    int count = (int)myDataset.size().value();
    for (int i = 0; i < 20; i++)
    {
        int randIndex = rng.uniform(0, count);
        Example<> record = myDataset.get(randIndex);

        Mat image(record.data.size(1), record.data.size(2), CV_32FC3, record.data.data_ptr());
        Mat toshow = Mat(image.rows, image.cols + 120, CV_32FC3, Scalar(0.5, 0.5, 0.5));
        image.copyTo(toshow(Rect(0, 0, image.rows, image.cols)));

        vector<ObjectFound> results;
        Tensor input = record.data.unsqueeze_(0).to(c10::kCUDA);
        net->predict(input, results);
        for (auto& item : results) // 处理矩形超图像边界情况
        {
            item.bound.x = std::max(item.bound.x, 0.0f);
            item.bound.y = std::max(item.bound.y, 0.0f);
        }
        for (const auto& item : results)
        {
            rectangle(toshow, Rect(item.bound), Scalar(0, 0, 1.0));
            string name = classes[item.category] + "," + std::to_string(item.confidence, 2);
            putText(toshow, name, Point2i(item.bound.x, item.bound.y + 14), 0, 0.6, Scalar(0.48, 1, 0.48));
        }
        vector<Size2f> bucket;
        int line = 1;
        for (int t = 0; t < record.target.size(0); t++)
        {
            for (int u = 0; u < 75; u += 25)
            {
                if (record.target[t][u + 20].item<int>() != 0)
                {
                    Size2f size;
                    size.width = record.target[t][u + 23].item<float>();
                    size.height = record.target[t][u + 24].item<float>();
                    auto found = std::find(bucket.begin(), bucket.end(), size);
                    if (found == bucket.end()) // 标签用尺寸去重
                    {
                        bucket.push_back(size);
                        Tensor thisPredict = record.target[t].index_select(0, torch::arange(u, u + 20));
                        int index = thisPredict.argmax(0).item<int>();
                        putText(toshow, classes[index], Point2i(image.cols, line * 16), 0, 0.6, Scalar(0.4, 0.8, 0));
                        line++;
                    }
                }
            }
        }
        string wndTitle = "预测图片-";
        wndTitle += myDataset.nameOf(randIndex);
        wndTitle += "-" + std::to_string(randIndex);
        imshow(wndTitle, toshow);
        moveWindow(wndTitle, 600, 600);
        if (toshow.cols < 480)
        {
            resizeWindow(wndTitle, 480, toshow.rows);
        }
        waitKey(5000);
        destroyWindow(wndTitle);
    }
}

/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// YOLO-V5里面有9个预设的anchor分别对应3个尺度的3个输出。这9个anchor是根据你的样本标
// 注框取得的。在V5教程中说是每次训练都重新分类。但是在这里样本没有变化，所以，我们还
// 是在训练前需要先对样本标记框分成9类
//---------------------------------------------------------------------------------------
int main()
{
    int option;
    cout << "选择功能：\n0->预先对样本进行KMEANS分成9类；\n1->训练Yolo-V5；\n2->检验Yolo-V5；\n其它：退出程序。\n";
    cin >> option;
    if (option == 0)
    {
        MDataset dataset("E:/ProjectO/VOCtrainval_11-May-2012", "train", classes);
        array<Size2f, 9> cates = dataset.kmeansClassify();
        for (auto item : cates)
        {
            cout << item.width << "," << item.height << endl;
        }
    }
    else if (option == 1)
    {
        Yolo yolo(20);
        train(yolo);
    }
    else if (option == 2)
    {
        Yolo yolo(20);
        check(yolo);
    }

    int z = 0;
    return 0;
}







