﻿#include "ResNetCifar100.h"
#include <filesystem>
#include <iostream>
#include <fstream>
#include <numeric>
#include <regex>

using namespace std;
using namespace cv;
using torch::Tensor;

Mat scaleKeepRatio(const Mat& input, const Size& bound)
{
    double sx = double(bound.width) / input.cols;
    double sy = double(bound.height) / input.rows;
    double ss = std::min(sx, sy);
    Size dest;
    dest.width = int(input.cols * ss);
    dest.height = int(input.rows * ss);
    Mat image(bound.height, bound.width, CV_8UC3, Scalar(0));
    Mat rescale;
    resize(input, rescale, dest);
    rescale.copyTo(image(Rect(0, 0, dest.width, dest.height)));
    return image;
}

void extractIndex(const torch::Tensor& oneLine, vector<int>& output)
{
    /* 注释的是多目标分类输出，0.5为阈值 */
    //int count = (int)oneLine.size(0);
    //const float* iptr = oneLine.const_data_ptr<float>();
    //for (int i = 0; i < count; i++)
    //{
    //    if (iptr[i] > 0.5f)
    //    {
    //        output.push_back(i);
    //    }
    //}
    /* 这是单目标输出，找最大值就行了 */
    output.push_back(oneLine.argmax().item<int>());
}

MConv2dImpl::MConv2dImpl(int input, int output, int ksize, int stride, int padding, bool ineedActive) :
    needActive(ineedActive)
{
    conv1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(input, output, ksize).stride(stride).padding(padding).bias(false));
    conv1 = register_module("conv1", conv1);
    bat2 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(output));
    bat2 = register_module("bat2", bat2);
    if (needActive)
    {
        relu3 = torch::nn::ReLU(torch::nn::ReLUOptions().inplace(true));
        relu3 = register_module("relu3", relu3);
    }
}

torch::Tensor MConv2dImpl::forward(torch::Tensor x)
{
    x = conv1->forward(x);
    x = bat2->forward(x);
    if (needActive)
    {
        x = relu3->forward(x);
    }
    return x;
}

//---------------------------------------------------------------------------------------
// 残差层的第二层是先加再激励
//---------------------------------------------------------------------------------------
MResidualImpl::MResidualImpl(int input, int output, int stride, int padding)
{
    conv1 = MConv2d(input, output, 3, stride, padding);
    conv1 = register_module("conv1", conv1);
    conv2 = MConv2d(output, output, 3, 1, padding, false);
    conv2 = register_module("conv2", conv2);
    relu3 = torch::nn::ReLU(torch::nn::ReLUOptions().inplace(true));
    relu3 = register_module("relu3", relu3);
    reduce = (stride != 1);
    if (reduce)
    {
        conv4 = torch::nn::Conv2d(torch::nn::Conv2dOptions(input, output, 1).stride(stride).bias(false));
        conv4 = register_module("conv4", conv4);
    }
}

torch::Tensor MResidualImpl::forward(torch::Tensor x)
{
    torch::Tensor y = conv1->forward(x);
    y = conv2->forward(y);
    if (reduce)
    {
        x = conv4->forward(x);
    }
    y += x;
    y = relu3->forward(y);
    return y;
}

ResNetImpl::ResNetImpl(int num_classes)
{
    features->push_back(MConv2d(3, 64, 3, 1, 1));
    /* Cifar100的图片比较小，网上教程普遍去掉最大池化层 */
    //features->push_back(torch::nn::MaxPool2d(torch::nn::MaxPool2dOptions(3).stride(2).padding(1)));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 128, 2, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 256, 2, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 512, 2, 1));
    features->push_back(MResidual(512, 512, 1, 1));
    features->push_back(MResidual(512, 512, 1, 1));

    features = register_module("features", features);
    avgpool1 = register_module("avgpool1", torch::nn::AdaptiveAvgPool2d(torch::nn::AdaptiveAvgPool2dOptions({ 1, 1 })));
    fc2 = register_module("fc2", torch::nn::Linear(torch::nn::LinearOptions(512, num_classes)));
}

torch::Tensor ResNetImpl::forward(torch::Tensor x)
{
    x = features->forward(x);
    x = avgpool1(x);
    x = torch::flatten(x, 1);
    x = fc2->forward(x); /* [批次, num_classes] */
    return x;
}

//---------------------------------------------------------------------------------------
// 这里只返回一个分类
//---------------------------------------------------------------------------------------
vector<int> ResNetImpl::predict(const Mat& image)
{
    Mat input = image;
    torch::Tensor imgTensor = torch::from_blob(input.data, { input.rows, input.cols, 3 }, torch::kByte).permute({ 2, 0, 1 });
    imgTensor = imgTensor.unsqueeze(0).to(torch::kF32).div(255.0);
    Tensor prediction = forward(imgTensor.to(torch::kCUDA)).squeeze(0).softmax(0);
    vector<int> output;
    extractIndex(prediction.to(torch::kCPU), output);
    return output;
}

/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// Cifar100数据集中的后缀有：train，test
//---------------------------------------------------------------------------------------
MDataset::MDataset(const string& dataDir, const string& suffix)
{
    string lblName = dataDir + "/coarse_label_names.txt";
    ifstream reader(lblName);
    while (!reader.eof())
    {
        string line;
        std::getline(reader, line);
        if (reader.fail() || line.empty())
        {
            break;
        }
        names.push_back(line);
    }
    readImageBufferFile(dataDir, suffix);
}

MDataset::MDataset(const string& dataDir, const vector<string>& labelNames, const string& suffix) :
    names(labelNames)
{
    readImageBufferFile(dataDir, suffix);
}

void MDataset::readImageBufferFile(const string& dataDir, const string& suffix)
{
    string binPath = dataDir + "/" + suffix + ".bin";
    ifstream imageReader(binPath, std::ios::in | std::ios::binary);
    if (!imageReader.is_open())
    {
        cout << "文件没有打开，检查文件" << endl;
    }
    while (!imageReader.eof())
    {
        char temp[512];
        imageReader.read(temp, 512);
        int rcount = imageReader.gcount();
        if (rcount != 0)
        {
            imageBuff.insert(imageBuff.length(), temp, rcount);
        }
    }
}

void MDataset::testRandomImage()
{
    for (int i = 0; i < 10; i++)
    {
        int index = rng.uniform(0, *size());
        Mat channels[3];
        channels[0] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2], 32);
        channels[1] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2 + 1024], 32);
        channels[2] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2 + 2048], 32);
        Mat image;
        cv::merge(channels, 3, image);
        resize(image, image, Size(), 2, 2);
        int category = imageBuff[index * 3074 + 0];
        putText(image, names[category], Point2i(10, 50), 0, 0.4, Scalar(0, 0, 255));
        imshow("测试图片分类", image);
        if (image.cols < 480)
        {
            resizeWindow("测试图片分类", Size(480, 480));
        }
        waitKey(2000);
    }
    destroyAllWindows();
}

torch::data::Example<> MDataset::get(size_t index)
{
    /* 数据集是3074字节一个块，每块的第一字节是粗分类0~10， */
    /* 第二字节是细分类0~100。后面3个1024字节分别是RGB通道数据 */
    Mat channels[3];
    channels[0] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2], 32);
    channels[1] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2 + 1024], 32);
    channels[2] = Mat(32, 32, CV_8UC1, &imageBuff[index * 3074 + 2 + 2048], 32);
    Mat image;
    cv::merge(channels, 3, image);
    tryTransform(image);
#ifdef SHOW_PREVIEW_IMAGE
    imshow("image", image);
    resizeWindow("image", Size(240, 240));
    waitKey(5000);
#endif

    int category = imageBuff[index * 3074 + 0];
    torch::Tensor lblTensor = torch::zeros({ 20 }); // 分类数
    lblTensor[category] = 1.0f; // 单个标签

    torch::Tensor imgTensor = torch::from_blob(image.data, { image.rows, image.cols, 3 }, torch::kByte).permute({ 2, 0, 1 });
    imgTensor = imgTensor.toType(torch::kF32).div(255.0f);
    return { imgTensor, lblTensor };
}

vector<string> MDataset::name() const
{
    return names;
}

torch::optional<size_t> MDataset::size() const
{
    int size = imageBuff.size() / 3074;
    return imageBuff.size() / 3074;
}

string MDataset::operator[](int index) const
{
    return names[index];
}

void MDataset::tryTransform(Mat& input)
{
    vector<Label> labels; /* 这是从其他文件合并来的参数，无用 */
    float ratio = float(rng);
    if (ratio < 0.5f)
    {
        flip(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.6f)
    {
        reexposure(input, labels);
    }
}

//---------------------------------------------------------------------------------------
// 随机平移
//---------------------------------------------------------------------------------------
void MDataset::translate(Mat& input, vector<Label>& labels) const
{
    Point2f minxy(5, 5); /* 平移量约1/10 */
    Point2f maxxy(27, 27); /* 平移量约1/10 */
    float errx1 = -minxy.x + 1; /* 设置1px容差 */
    float errx2 = input.cols - maxxy.x - 1;
    float erry1 = -minxy.y + 1;
    float erry2 = input.rows - maxxy.y - 1;
    if (errx2 - errx1 < 1 && erry2 - erry1 < 1)
    {
        return; // 容差太小就不平移了
    }
    float movx = rng.uniform(errx1, errx2);
    float movy = rng.uniform(erry1, erry2);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = 1;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = movx;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = 1;
    trans.at<float>(1, 2) = movy;
    warpAffine(input, input, trans, input.size());
}

//---------------------------------------------------------------------------------------
// 随机水平反转
//---------------------------------------------------------------------------------------
void MDataset::flip(Mat& input, vector<Label>& labels) const
{
    bool fx = rng(2);
    bool fy = 0/*rng(2)*/;
    if (!fx && !fy)
    {
        return; // 不需反转就直接返回
    }
    Mat xcoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < xcoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            xcoor.at<float>(i, j) = float(fx ? xcoor.cols - j : j);
        }
    }
    Mat ycoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < ycoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            ycoor.at<float>(i, j) = float(fy ? ycoor.rows - i : i);
        }
    }
    remap(input, input, xcoor, ycoor, INTER_LINEAR);
}

//---------------------------------------------------------------------------------------
// 色相H 饱和度S 亮度V
//---------------------------------------------------------------------------------------
void MDataset::reexposure(Mat& input, vector<Label>& labels) const
{
    Mat hsvImage;
    int hue = rng.uniform(-24, 24);
    int saturation = rng.uniform(-48, 48);
    int expose = rng.uniform(-48, 48);
    cvtColor(input, hsvImage, COLOR_BGR2HSV);
    Mat channel[3];
    split(hsvImage, channel);
    for (MatIterator_<uchar> item = channel[0].begin<uchar>(); item != channel[0].end<uchar>(); item++)
    {
        *item = (*item + 180 + hue) % 180; // 色相是循环的
    }
    channel[1] += saturation;
    channel[2] += expose;
    merge(channel, 3, hsvImage);
    cvtColor(hsvImage, input, COLOR_HSV2BGR);
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// 更新学习率函数。这种更新策略只影响1%左右的正确率
//---------------------------------------------------------------------------------------
void updateLearnRate(torch::optim::SGD& optimizer, double alpha)
{
    for (auto& pg : optimizer.param_groups())
    {
        if (pg.has_options())
        {
            torch::optim::OptimizerOptions& options = pg.options();
            static_cast<torch::optim::SGDOptions&>(options).lr(alpha);
        }
    }
}

//---------------------------------------------------------------------------------------
// Cifar100是单标签分类。有两种方式：
// 1、binary_cross_entropy_with_logits+sigmoid，预测结果找最大值为结果
// 2、cross_entropy_loss+softmax，预测结果找最大值为结果
// 现在经过测试方法1的正确率更高一些
//---------------------------------------------------------------------------------------
void train(ResNet& resnet)
{
    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;

    MapDataset myTrain = MDataset("E:/ProjectO/cifar-100-binary", "train").
        map(torch::data::transforms::Stack<>());
    DataLoader dataTrain = torch::data::make_data_loader(myTrain, 64);
    MapDataset myValidate = MDataset("E:/ProjectO/cifar-100-binary", "test").
        map(torch::data::transforms::Stack<>());
    DataLoader dataValidate = torch::data::make_data_loader(myValidate, 64);

    cout << "样本数量是：" << *myTrain.size() << endl;

    resnet->to(c10::kCUDA);
    double alpha = 0.05;
    torch::optim::SGD optimizer(resnet->parameters(), torch::optim::SGDOptions(alpha));
    for (int i = 0; i < 100; i++)
    {
        resnet->train();
        float lossValue = 0;
        float lossNum = 0;
        for (auto& batch : *dataTrain)
        {
            torch::Tensor input = batch.data.to(c10::kCUDA);
            torch::Tensor target = batch.target.to(c10::kCUDA);
            torch::Tensor predict = resnet->forward(input);
            //torch::Tensor pos_w = torch::ones({ 20 }) * 9.0; /* 正负样本平衡 */
            //torch::Tensor loss = torch::binary_cross_entropy_with_logits(predict, target, {}, pos_w.to(torch::kCUDA));
            torch::Tensor loss = torch::cross_entropy_loss(predict, target); /* 单标签损失 */
            optimizer.zero_grad();
            loss.backward();
            optimizer.step();
            lossValue += loss.item<float>();
            lossNum++;
        }
        cout << "批次: " << i + 1 << "。训练损失: " << lossValue / lossNum << endl;

        int epoch = i;
        // 调整学习率
        if (epoch <= 30 && (epoch + 1) % 5 == 0)
        {
            alpha *= 0.98;
            updateLearnRate(optimizer, alpha);  //更新学习率
        }
        else if (epoch > 30 && epoch <= 70 && (epoch + 1) % 5 == 0)
        {
            alpha *= 0.95;
            updateLearnRate(optimizer, alpha);
        }
        else if (epoch > 70 && epoch <= 100 && (epoch + 1) % 5 == 0)
        {
            alpha *= 0.925;
            updateLearnRate(optimizer, alpha);
        }
        else if (epoch > 100 && epoch <= 200 && (epoch + 1) % 5 == 0)
        {
            alpha *= 0.9;
            updateLearnRate(optimizer, alpha);
        }
        else if (epoch > 200 && (epoch + 1) % 5 == 0)
        {
            alpha *= 0.88;
            updateLearnRate(optimizer, alpha);
        }

        resnet->eval();
        int oneCount = 0;
        int wholeOne = 0;
        for (auto& batch : *dataValidate)
        {
            torch::Tensor input = batch.data.to(c10::kCUDA);
            torch::Tensor target = batch.target.to(c10::kCUDA);
            torch::Tensor predict = resnet->forward(input).softmax(1);
            /* 这里以0.5作为阈值计算1中的正确率和0中的正确率 */
            /* 这种方法是多目标分类的计算方式，多目标代码遗留，仅供参考 */
            torch::Tensor whole = (target.argmax(1) == predict.argmax(1));
            wholeOne += whole.numel(); /* 1的数量 */
            oneCount += whole.count_nonzero().item<int>();
        }
        cout << "验证比例：" << double(oneCount) / wholeOne << endl;
    }

    torch::save(resnet, "resnet.pt");
}

//---------------------------------------------------------------------------------------
// 测试ResNet的预测对不对。现在修改为单标签的方式
//---------------------------------------------------------------------------------------
void testPredict(ResNet& resnet)
{
    torch::load(resnet, "resnet.pt");
    resnet->eval();

    setRNGSeed((int)getTickCount());
    RNG& rng = theRNG();
    // 更换数据集内部标签顺序不会变
    MDataset myTest(R"(E:/ProjectO/cifar-100-binary)", "test");
    int right = 0;
    for (int i = 0; i < 50; i++)
    {
        int rand = rng.uniform(0, *myTest.size());
        torch::data::Example<> sample = myTest.get(rand);
        torch::Tensor input = sample.data;
        // 这里把Tensor转换成Mat。虽然在MDataset::get()函数中我们用permute()打乱
        // 了通道顺序，但是libtorch并不会真的修改内存中的数据顺序，所以实际上input
        // 的内存中数据顺序还是Mat加载时的BGRBGRBGR...顺序
        Mat image(input.size(1), input.size(2), CV_32FC3, input.data_ptr());
        Mat color;
        image.convertTo(color, CV_8UC3, 255);

        vector<int> indices = resnet->predict(color);
        vector<int> classes;
        extractIndex(sample.target, classes);
        cout << "预测结果的真实值：";
        for (const auto item : classes)
        {
            cout << myTest[item] << " ";
        }
        cout << "预测值：";
        for (const auto item : indices)
        {
            cout << myTest[item] << " ";
        }
        cout << endl;
        if (indices == classes) /* 输出集合中仅1个元素 */
        {
            right++;
        }

#ifdef SHOW_IMAGE
        imshow("随机验证图片", color);
        resizeWindow("随机验证图片", Size(240, 240));
        waitKey(10000);
#endif
    }
    cout << "总正确率：" << right / double(50) << endl;
}

/////////////////////////////////////////////////////////////////////////////////////////

int main()
{
    ResNet resnet(20);

    /* 打印网络结构 */
    auto dict = resnet->named_parameters();
    for (auto name : dict)
    {
        cout << name.key() << endl;
    }

    /* 训练 */
    //train(resnet);
    /* 验证 */
    testPredict(resnet);

    int z = 0;
    return 0;
}






