﻿#include "ResNet.h"
#include <filesystem>
#include <iostream>
#include <fstream>
#include <numeric>
#include <regex>

using namespace std;
using namespace cv;
using torch::Tensor;

Mat scaleKeepRatio(const Mat& input, const Size& bound)
{
    double sx = double(bound.width) / input.cols;
    double sy = double(bound.height) / input.rows;
    double ss = std::min(sx, sy);
    Size dest;
    dest.width = int(input.cols * ss);
    dest.height = int(input.rows * ss);
    Mat image(bound.height, bound.width, CV_8UC3, Scalar(0));
    Mat rescale;
    resize(input, rescale, dest);
    rescale.copyTo(image(Rect(0, 0, dest.width, dest.height)));
    return image;
}

void extractIndex(const torch::Tensor& oneLine, vector<int>& output)
{
    int count = (int)oneLine.size(0);
    const float* iptr = oneLine.const_data_ptr<float>();
    for (int i = 0; i < count; i++)
    {
        if (iptr[i] > 0.5f)
        {
            output.push_back(i);
        }
    }
}

MConv2dImpl::MConv2dImpl(int input, int output, int ksize, int stride, int padding, bool ineedActive) :
    needActive(ineedActive)
{
    conv1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(input, output, ksize).stride(stride).padding(padding).bias(false));
    conv1 = register_module("conv1", conv1);
    bat2 = torch::nn::BatchNorm2d(torch::nn::BatchNorm2dOptions(output));
    bat2 = register_module("bat2", bat2);
    if (needActive)
    {
        relu3 = torch::nn::LeakyReLU(torch::nn::LeakyReLUOptions().inplace(true));
        relu3 = register_module("relu3", relu3);
    }
}

torch::Tensor MConv2dImpl::forward(torch::Tensor x)
{
    x = conv1->forward(x);
    x = bat2->forward(x);
    if (needActive)
    {
        x = relu3->forward(x);
    }
    return x;
}

//---------------------------------------------------------------------------------------
// 残差层的第二层是先加再激励
//---------------------------------------------------------------------------------------
MResidualImpl::MResidualImpl(int input, int output, int stride, int padding)
{
    conv1 = MConv2d(input, output, 3, stride, padding);
    conv1 = register_module("conv1", conv1);
    conv2 = MConv2d(output, output, 3, 1, padding, false);
    conv2 = register_module("conv2", conv2);
    relu3 = torch::nn::LeakyReLU(torch::nn::LeakyReLUOptions().inplace(true));
    relu3 = register_module("relu3", relu3);
    reduce = (stride != 1);
    if (reduce)
    {
        conv4 = MConv2d(input, output, 1, stride, 0, false);
        conv4 = register_module("conv4", conv4);
    }
}

torch::Tensor MResidualImpl::forward(torch::Tensor x)
{
    torch::Tensor y = conv1->forward(x);
    y = conv2->forward(y);
    if (reduce)
    {
        x = conv4->forward(x);
    }
    y += x;
    y = relu3->forward(y);
    return y;
}

ResNetImpl::ResNetImpl(int num_classes)
{
    features->push_back(MConv2d(3, 64, 7, 2, 3));
    features->push_back(torch::nn::MaxPool2d(torch::nn::MaxPool2dOptions(3).stride(2).padding(1)));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 64, 1, 1));
    features->push_back(MResidual(64, 128, 2, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 128, 1, 1));
    features->push_back(MResidual(128, 256, 2, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 256, 1, 1));
    features->push_back(MResidual(256, 512, 2, 1));
    features->push_back(MResidual(512, 512, 1, 1));
    features->push_back(MResidual(512, 512, 1, 1));

    features = register_module("features", features);
    avgpool1 = register_module("avgpool1", torch::nn::AdaptiveAvgPool2d(torch::nn::AdaptiveAvgPool2dOptions(1)));
    fc2 = register_module("fc2", torch::nn::Linear(torch::nn::LinearOptions(512, num_classes)));
}

torch::Tensor ResNetImpl::forward(torch::Tensor x)
{
    x = features->forward(x);
    x = avgpool1(x);
    x = torch::flatten(x, 1);
    x = fc2->forward(x);
    return x;
}

//---------------------------------------------------------------------------------------
// 这里只返回一个分类
//---------------------------------------------------------------------------------------
vector<int> ResNetImpl::predict(const Mat& image)
{
    Mat input = scaleKeepRatio(image, Size(224, 224));
    torch::Tensor imgTensor = torch::from_blob(input.data, { input.rows, input.cols, 3 }, torch::kByte).permute({ 2, 0, 1 });
    imgTensor = imgTensor.unsqueeze(0).to(torch::kF32).div(255.0);
    Tensor prediction = forward(imgTensor.to(torch::kCUDA)).squeeze(0).sigmoid();
    vector<int> output;
    extractIndex(prediction.to(torch::kCPU), output);
    return output;
}

/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// VOC数据集中的后缀有：train，trainval和val
//---------------------------------------------------------------------------------------
MDataset::MDataset(const string& vocDir, const string& suffix)
{
    using std::filesystem::directory_iterator;
    using std::filesystem::path;

    string lblPath = vocDir + "/VOCdevkit/VOC2012/ImageSets/Main/";
    vector<string> classFile;
    for (auto& v : directory_iterator(lblPath))
    {
        path file = v.path();
        string name = file.filename().string();
        regex expr(R"(^(.+)_)" + suffix + R"(\.txt$)");
        smatch result;
        if (std::regex_match(name, result, expr))
        {
            classFile.push_back(file.string());
            names.push_back(result[1]);
        }
    }
    readImageAndLabel(vocDir, classFile);
}

MDataset::MDataset(const string& vocDir, const vector<string>& labelNames, const string& suffix) :
    names(labelNames)
{
    vector<string> classFile;
    string txtPath = vocDir + "/VOCdevkit/VOC2012/ImageSets/Main/";
    for (const auto& item : names)
    {
        classFile.push_back(txtPath + item + "_" + suffix + ".txt");
    }
    readImageAndLabel(vocDir, classFile);
}

void MDataset::readImageAndLabel(const string& vocDir, const vector<string>& classFile)
{
    string imgPath = vocDir + "/VOCdevkit/VOC2012/JPEGImages/";
    int count = (int)classFile.size();
    for (int i = 0; i < count; i++)
    {
        ifstream fs(classFile[i], ios::in);
        string line;
        int index = 0; /* 行号 */
        while (!fs.eof())
        {
            std::getline(fs, line);
            if (!fs.fail())
            {
                string imageName = line.substr(0, 11);
                /* 处理第一个txt文件，这时候images是空数组 */
                /* 处理其它文件时images已被填充就不需要push_back了 */
                if (i == 0)
                {
                    images.push_back(imgPath + imageName + ".jpg");
                }
                int label = std::stoi(line.substr(12));
                /* 处理第一个txt文件，这时候labels是空数组 */
                /* 处理其它文件时labels已被填充就不需要push_back了 */
                if (i == 0)
                {
                    labels.push_back(vector<int>());
                }
                /* 标记为1则表示此图属于当前分类 */
                /* 一张图片里有多个物体时会有多个标签 */
                if (label >= 0)
                {
                    labels[index].push_back(i); /* i是分类 */
                }
                index++;
            }
        }
    }
#ifdef TEST_SHOW_IMAGE /* 随机抽几张图片观察分类是否正确 */
    testRandomImage();
#endif
}

void MDataset::testRandomImage()
{
    setRNGSeed((int)getTickCount());
    RNG rng = theRNG();
    for (int i = 0; i < 10; i++)
    {
        int index = rng.uniform(0, (int)images.size());
        Mat testImage = imread(images[index], IMREAD_COLOR);
        putText(testImage, names[labels[index][0]], Point2i(10, 40), 0, 1, Scalar(0, 0, 255));
        imshow("测试图片分类", testImage);
        if (testImage.cols < 480)
        {
            resizeWindow("测试图片分类", Size(480, testImage.rows));
        }
        waitKey(2000);
    }
    destroyAllWindows();
}

torch::data::Example<> MDataset::get(size_t index)
{
    Mat srcLoad = imread(images[index], IMREAD_COLOR);
    Mat image = scaleKeepRatio(srcLoad, Size(224, 224));

    tryTransform(image, vector<Label>());

    torch::Tensor lblTensor = torch::zeros({ 20 }); // 20个分类
    for (const int item : labels[index])
    {
        lblTensor[item] = 1.0f; // 多标签
    }
    torch::Tensor imgTensor = torch::from_blob(image.data, { image.rows, image.cols, 3 }, torch::kByte).permute({ 2, 0, 1 });
    imgTensor = imgTensor.toType(torch::kF32).div(255.0f);
    return { imgTensor, lblTensor };
}

vector<string> MDataset::name() const
{
    return names;
}

torch::optional<size_t> MDataset::size() const
{
    return images.size();
}

string MDataset::operator[](int index) const
{
    return names[index];
}

void MDataset::tryTransform(Mat& input, vector<Label>& labels)
{
    float ratio = float(rng);
    if (ratio < 0.6f)
    {
        translate(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.5f)
    {
        flip(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.6f)
    {
        rotate(input, labels);
    }
    ratio = float(rng);
    if (ratio < 0.6f)
    {
        reexposure(input, labels);
    }
}

//---------------------------------------------------------------------------------------
// 随机平移
//---------------------------------------------------------------------------------------
void MDataset::translate(Mat& input, vector<Label>& labels) const
{
    Point2f minxy(24, 24); /* 平移量约1/10 */
    Point2f maxxy(200, 200); /* 平移量约1/10 */
    float errx1 = -minxy.x + 1; /* 设置1px容差 */
    float errx2 = input.cols - maxxy.x - 1;
    float erry1 = -minxy.y + 1;
    float erry2 = input.rows - maxxy.y - 1;
    if (errx2 - errx1 < 5 && erry2 - erry1 < 5)
    {
        return; // 容差太小就不平移了
    }
    float movx = rng.uniform(errx1, errx2);
    float movy = rng.uniform(erry1, erry2);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = 1;
    trans.at<float>(0, 1) = 0;
    trans.at<float>(0, 2) = movx;
    trans.at<float>(1, 0) = 0;
    trans.at<float>(1, 1) = 1;
    trans.at<float>(1, 2) = movy;
    warpAffine(input, input, trans, input.size());
}

//---------------------------------------------------------------------------------------
// 随机旋转±10°
//---------------------------------------------------------------------------------------
void MDataset::rotate(Mat& input, vector<Label>& labels) const
{
    float angle = rng.uniform(-10.0f, 10.0f) * M_PI / 180.0f;
    float cos = cosf(angle);
    float sin = sinf(angle);
    float cx = input.cols * 0.5f;
    float cy = input.rows * 0.5f;
    float movx = cx - (cx * cos - cy * sin);
    float movy = cy - (cx * sin + cy * cos);
    Mat trans(2, 3, CV_32FC1);
    trans.at<float>(0, 0) = cos;
    trans.at<float>(0, 1) = -sin;
    trans.at<float>(0, 2) = movx;
    trans.at<float>(1, 0) = sin;
    trans.at<float>(1, 1) = cos;
    trans.at<float>(1, 2) = movy;
    warpAffine(input, input, trans, input.size());
}

//---------------------------------------------------------------------------------------
// 随机水平反转
//---------------------------------------------------------------------------------------
void MDataset::flip(Mat& input, vector<Label>& labels) const
{
    bool fx = rng(2);
    bool fy = 0/*rng(2)*/;
    if (!fx && !fy)
    {
        return; // 不需反转就直接返回
    }
    Mat xcoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < xcoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            xcoor.at<float>(i, j) = float(fx ? xcoor.cols - j : j);
        }
    }
    Mat ycoor(input.rows, input.cols, CV_32FC1);
    for (int i = 0; i < ycoor.rows; i++)
    {
        for (int j = 0; j < xcoor.cols; j++)
        {
            ycoor.at<float>(i, j) = float(fy ? ycoor.rows - i : i);
        }
    }
    remap(input, input, xcoor, ycoor, INTER_LINEAR);
}

//---------------------------------------------------------------------------------------
// 色相H 饱和度S 亮度V
//---------------------------------------------------------------------------------------
void MDataset::reexposure(Mat& input, vector<Label>& labels) const
{
    Mat hsvImage;
    int hue = rng.uniform(-24, 24);
    int saturation = rng.uniform(-48, 48);
    int expose = rng.uniform(-48, 48);
    cvtColor(input, hsvImage, COLOR_BGR2HSV);
    Mat channel[3];
    split(hsvImage, channel);
    for (MatIterator_<uchar> item = channel[0].begin<uchar>(); item != channel[0].end<uchar>(); item++)
    {
        *item = (*item + 180 + hue) % 180; // 色相是循环的
    }
    channel[1] += saturation;
    channel[2] += expose;
    merge(channel, 3, hsvImage);
    cvtColor(hsvImage, input, COLOR_HSV2BGR);
}

void MDataset::calcClassFrequency(vector<float>& freqs) const
{
    int numbers[20] = { 0 };
    for (const auto& sample : labels)
    {
        for (int item : sample)
        {
            numbers[item]++; /* 累积每个类别的出现数量 */
        }
    }
    for (int acc : numbers)
    {
        freqs.push_back(float(acc) / *size());
    }
}

/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

//---------------------------------------------------------------------------------------
// 创建正负样本均衡权重因子
//---------------------------------------------------------------------------------------
Tensor createPosWeight(const MDataset& dataset)
{
    vector<float> freqs;
    dataset.calcClassFrequency(freqs);
    int count = (int)dataset.size().value();
    for (float& item : freqs)
    {
        item = std::min(12.0f, 1 / item); /* 频度高的权重小 */
    }
    Tensor posw = torch::from_blob(freqs.data(), { 20 }, torch::kFloat);
    return posw.clone(); /* 克隆防止数据失效 */
}

/* 带权重迁移训练 */
void trainWithWeight(ResNet& resnet)
{
    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;

    MapDataset myTrain = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train").
        map(torch::data::transforms::Stack<>());
    DataLoader dataTrain = torch::data::make_data_loader(myTrain, 12);
    MapDataset myValidate = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", myTrain.dataset().name(), "val").
        map(torch::data::transforms::Stack<>());
    DataLoader dataValidate = torch::data::make_data_loader(myValidate, 12);

    cout << "迁移训练。样本数量是：" << myTrain.size().value() << endl;

    resnet->to(c10::kCUDA);
    /* 这个posw加权容易导致训练loss=NAN因此没用 */
    Tensor posw = createPosWeight(myTrain.dataset()).to(torch::kCUDA);
    const int numEpochs = 160;
    double learnRate = 0.01;
    for (int epoch = 0; epoch < numEpochs; ++epoch)
    {
        if (epoch == numEpochs / 2)
        {
            learnRate /= 10;
        }
        torch::optim::SGD optimizer(resnet->parameters(), learnRate); // 学习率
        if (epoch < numEpochs / 8) /* 前1/8训练全连接层 */
        //{
        //    for (auto& np : resnet->named_parameters())
        //    {
        //        bool needGrad = (np.key().find("fc") != -1);
        //        np.value().set_requires_grad(needGrad); /* find的返回值是size_t */
        //    }
        //}
        //else /* 1/8之后训练全部 */
        {
            for (auto& np : resnet->named_parameters())
            {
                np.value().set_requires_grad(true);
            }
        }
        float lossTrain = 0;
        int batchCount = 0;
        for (auto& batch : *dataTrain)
        {
            Tensor data = batch.data.to(c10::kCUDA);
            Tensor target = batch.target.to(c10::kCUDA);
            optimizer.zero_grad();
            // 执行模型
            torch::Tensor prediction = resnet->forward(data);
            assert(torch::isnan(prediction).sum() == 0);
            // 计算损失大小
            torch::Tensor loss = torch::binary_cross_entropy_with_logits(prediction, target/*, {}, posw*/);
            // 计算梯度
            loss.backward();
            // 更新权重
            optimizer.step();
            lossTrain += loss.item<float>();
            batchCount++;
        }
        cout << "批次: " << epoch << "。训练损失: " << lossTrain / batchCount << endl;

        // 验证
        resnet->eval();
        int oneCount = 0, zeroCount = 0;
        int wholeOne = 0, wholeZero = 0;
        for (auto& batch : *dataValidate)
        {
            torch::Tensor input = batch.data.to(c10::kCUDA);
            torch::Tensor target = batch.target.to(c10::kCUDA);
            torch::Tensor predict = resnet->forward(input).sigmoid();
            /* 由于VOC数据集是多标签分类，所以这里使用预测1的正确率和预测0 */
            /* 的正确率，共同表示网络的性能。见下 */
            torch::Tensor oneRate = ((target != 0) & (predict >= 0.5f));
            torch::Tensor zeroRate = ((target == 0) & (predict < 0.5f));
            wholeOne += target.count_nonzero().item<int>(); /* 1的数量 */
            oneCount += oneRate.count_nonzero().item<int>();
            wholeZero += target.numel() - target.count_nonzero().item<int>(); /* 0的数量 */
            zeroCount += zeroRate.count_nonzero().item<int>();
        }
        cout << "验证比例：" << double(oneCount) / wholeOne << "\t|\t" <<
            double(zeroCount) / wholeZero << endl;
    }

    torch::save(resnet, "resnet.pt");
}

void train(ResNet& resnet)
{
    using MapDataset = torch::data::datasets::MapDataset<MDataset, torch::data::transforms::Stack<>>;
    using DataLoader = unique_ptr<torch::data::StatelessDataLoader<MapDataset, torch::data::samplers::RandomSampler>>;

    MapDataset myTrain = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", "train").
        map(torch::data::transforms::Stack<>());
    DataLoader dataTrain = torch::data::make_data_loader(myTrain, 12);
    MapDataset myValidate = MDataset("E:/ProjectO/VOCtrainval_11-May-2012", myTrain.dataset().name(), "val").
        map(torch::data::transforms::Stack<>());
    DataLoader dataValidate = torch::data::make_data_loader(myValidate, 12);

    cout << "样本数量是：" << myTrain.size().value() << endl;

    resnet->to(c10::kCUDA);
    Tensor posw = createPosWeight(myTrain.dataset());
    torch::optim::SGD optimizer(resnet->parameters(), torch::optim::SGDOptions(0.05).weight_decay(1e-3));
    for (int i = 0; i < 150; i++)
    {
        resnet->train();
        float lossValue = 0;
        float lossNum = 0;
        for (auto& batch : *dataTrain)
        {
            torch::Tensor input = batch.data.to(c10::kCUDA);
            torch::Tensor target = batch.target.to(c10::kCUDA);
            torch::Tensor predict = resnet->forward(input);
            torch::Tensor loss = torch::binary_cross_entropy_with_logits(predict, target, {}, posw.to(torch::kCUDA));
            optimizer.zero_grad();
            loss.backward();
            optimizer.step();
            lossValue += loss.item<float>();
            lossNum++;
        }
        cout << "批次: " << i + 1 << "。训练损失: " << lossValue / lossNum << endl;

        resnet->eval();
        int oneCount = 0, zeroCount = 0;
        int wholeOne = 0, wholeZero = 0;
        for (auto& batch : *dataValidate)
        {
            torch::Tensor input = batch.data.to(c10::kCUDA);
            torch::Tensor target = batch.target.to(c10::kCUDA);
            torch::Tensor predict = resnet->forward(input).sigmoid();
            /* 由于VOC数据集是多标签分类，所以这里使用预测1的正确率和预测0 */
            /* 的正确率，共同表示网络的性能。见下 */
            torch::Tensor oneRate = ((target != 0) & (predict >= 0.5f));
            torch::Tensor zeroRate = ((target == 0) & (predict < 0.5f));
            wholeOne += target.count_nonzero().item<int>(); /* 1的数量 */
            oneCount += oneRate.count_nonzero().item<int>();
            wholeZero += target.numel() - target.count_nonzero().item<int>(); /* 0的数量 */
            zeroCount += zeroRate.count_nonzero().item<int>();
        }
        cout << "验证比例：" << double(oneCount) / wholeOne << "\t|\t" << 
            double(zeroCount) / wholeZero << endl;
    }

    torch::save(resnet, "resnet.pt");
}

//---------------------------------------------------------------------------------------
// 用来测试预测对不对
//---------------------------------------------------------------------------------------
void testPredict(ResNet& resnet)
{
    torch::load(resnet, "resnet.pt");
    resnet->eval();

    setRNGSeed((int)getTickCount());
    RNG& rng = theRNG();
    // 更换数据集注意内部标签顺序会不会变
    MDataset myTest(R"(E:\ProjectO\VOCtrainval_11-May-2012)", "val");
    int right = 0;
    for (int i = 0; i < 50; i++)
    {
        int rand = rng.uniform(0, *myTest.size());
        torch::data::Example<> sample = myTest.get(rand);
        torch::Tensor input = sample.data;
        // 这里把Tensor转换成Mat。虽然在MDataset::get()函数中我们用permute()打乱
        // 了通道顺序，但是libtorch并不会真的修改内存中的数据顺序，所以实际上input
        // 的内存中数据顺序还是Mat加载时的BGRBGRBGR...顺序
        Mat image(input.size(1), input.size(2), CV_32FC3, input.data_ptr());
        Mat color;
        image.convertTo(color, CV_8UC3, 255);

        vector<int> indices = resnet->predict(color);
        vector<int> classes;
        extractIndex(sample.target, classes);
        cout << "预测结果的真实值：";
        for (const auto item : classes)
        {
            cout << myTest[item] << " ";
        }
        cout << "预测值：";
        for (const auto item : indices)
        {
            cout << myTest[item] << " ";
        }
        cout << endl;
        if (classes == indices) /* 标签全都正确才正确 */
        {
            right++;
        }

#ifdef SHOW_IMAGE
        imshow("随机图片", color);
        waitKey(10000);
#endif
    }
    cout << "全部正确率：" << right / double(50) << endl;
}

#include "torch/script.h"

/* 用预训练权重初始化 */
void initialize(ResNet& resnet)
{
    /* 预训练权重需要python转换并且版本要兼容，否则C++加载不了 */
    torch::jit::Module v = torch::jit::load("resnet34p.pt");
    torch::jit::named_parameter_list pretrainedDict = v.named_parameters();

    torch::autograd::GradMode::set_enabled(false); // 使参数可以拷贝
    auto params = resnet->named_parameters(true);
    /* 我们已经检查了加载的预训练模型的网络层次和本实现的网络层次一致 */
    /* 只是名字不相同，所以可以直接对位赋值 */
    int i = 0;
    for (auto& val : pretrainedDict)
    {
        if (val.name.find("fc") == -1) /* 全连接层的不要了 */
        {
            Tensor& tparam = params[i].value();
            tparam.copy_(val.value);
            cout << val.name << " => " << params[i].key() << endl;
            i++;
        }
    }
    torch::autograd::GradMode::set_enabled(true);
}

/////////////////////////////////////////////////////////////////////////////////////////

int main()
{
    ResNet resnet(20);

    /* 打印网络结构 */
    auto dict = resnet->named_parameters();
    for (auto name : dict)
    {
        cout << name.key() << endl;
    }

    //initialize(resnet);
    //trainWithWeight(resnet);
    //train(resnet);
    testPredict(resnet);

    int z = 0;
    return 0;
}






