﻿#include <iostream>
#include <vector>
#include <sstream>
#include <fstream>
#include <cmath>
#include <cstdlib>
#define TEST
using namespace std;

struct Data {
    vector<double> features; // 单个样本的特征
    int label; // 单个样本的标签
    Data(vector<double> f, int l) : features(f), label(l) //
    {}
};
struct Param {
    vector<double> wtSet; // 权重参数
};


class LR {
public:
    void train(); //训练
    void predict(); //预测
    int loadModel(); //加载以前保存的模型，即保存的权重参数
    int storeModel(); //保存模型，即保存权重参数
    LR(string trainFile, string testFile, string predictOutFile); //构造函数

private:
    vector<Data> trainDataSet; //训练数据集
    vector<Data> testDataSet;  //测试数据集
    vector<int> predictVec; //预测结果
    Param param; //权重参数
    string trainFile; //训练集文件所在路径
    string testFile; //测试集文件所在路径
    string predictOutFile; //预测结果输出文件所在路径
    string weightParamFile = "modelweight.txt"; //保存模型参数的文件

private:
    bool init(); //初始化，主要是加载训练数据集，设置单个训练样本的特征个数
    bool loadTrainData(); //加载训练数据
    bool loadTestData(); //加载测试数据
    int storePredict(vector<int>& predict);
    void initParam(); //对权重参数进行初始化
    double wxbCalc(const Data& data); //单样本 Y = X*W + B 的计算，这里没有B
    double sigmoidCalc(const double wxb); //单变量的Sigmoid函数
    double lossCal(); //损失函数的计算
    double gradientSlope(const vector<Data>& dataSet, int index, const vector<double>& sigmoidVec); //计算单变量(权重[index])的梯度，这里返回的是负梯度值

private:
    int featuresNum; //单个样本的特征数量
    const double wtInitV = 1.0; //初始化权重的数值大小
    const double stepSize = 0.1; //学习率
    const int maxIterTimes = 300; //迭代次数
    const double predictTrueThresh = 0.5; //预测为真的阈值，计算结果大于该阈值，则预测结果为 1，否则为 0
    const int train_show_step = 10; //每迭代多少次，打印一次训练信息
};

//构造函数
LR::LR(string trainF, string testF, string predictOutF)
{
    trainFile = trainF;//训练集路径
    testFile = testF;//测试集路径
    predictOutFile = predictOutF;//预测结输出路径
    featuresNum = 0;
    init();//其他信息的初始化
}

//加载训练数据
bool LR::loadTrainData()
{
    ifstream infile(trainFile.c_str());
    string line;

    if (!infile) {
        cout << "打开训练文件失败" << endl;
        exit(0);
    }

    while (infile) {
        getline(infile, line);
        if (line.size() > featuresNum) {
            stringstream sin(line);
            char ch;
            double dataV;
            int i;
            vector<double> feature;
            i = 0;

            while (sin) {
                char c = sin.peek();
                if (int(c) != -1) {
                    sin >> dataV;
                    feature.push_back(dataV);
                    sin >> ch;
                    i++;
                }
                else {
                    cout << "训练文件数据格式不正确，出错行为" << (trainDataSet.size() + 1) << "行" << endl;
                    return false;
                }
            }
            int ftf;
            ftf = (int)feature.back();
            feature.pop_back();
            trainDataSet.push_back(Data(feature, ftf));
        }
    }
    infile.close();
    return true;
}

//对权重参数进行初始化
void LR::initParam()
{
    int i;
    for (i = 0; i < featuresNum; i++) {
        param.wtSet.push_back(wtInitV);
    }
}

//初始化，主要是加载训练数据集，设置单个训练样本的特征个数
bool LR::init()
{
    trainDataSet.clear(); //清除训练数据
    bool status = loadTrainData();
    if (status != true) {
        return false;
    }
    featuresNum = trainDataSet[0].features.size(); //单个训练样本的特征个数
    param.wtSet.clear(); //权重参数清零
    initParam(); //对权重参数进行初始化
    return true;
}

//单样本 Y = X*W + B 的计算，这里没有B
double LR::wxbCalc(const Data& data)
{
    double mulSum = 0.0L;
    int i;
    double wtv, feav;
    for (i = 0; i < param.wtSet.size(); i++) {
        wtv = param.wtSet[i];
        feav = data.features[i];
        mulSum += wtv * feav;
    }

    return mulSum;
}

//单变量的Sigmoid函数
inline double LR::sigmoidCalc(const double wxb)
{
    double expv = exp(-1 * wxb);
    double expvInv = 1 / (1 + expv);
    return expvInv;
}

//损失函数的计算
double LR::lossCal()
{
    double lossV = 0.0L;
    int i;
    //多样本的交叉熵损失
    for (i = 0; i < trainDataSet.size(); i++) {
        lossV -= trainDataSet[i].label * log(sigmoidCalc(wxbCalc(trainDataSet[i])));
        lossV -= (1 - trainDataSet[i].label) * log(1 - sigmoidCalc(wxbCalc(trainDataSet[i])));
    }
    lossV /= trainDataSet.size();//平均交叉熵损失
    return lossV;
}

//计算单变量(权重[index])的梯度，这里返回的是负梯度值
double LR::gradientSlope(const vector<Data>& dataSet, int index, const vector<double>& sigmoidVec)
{
    double gsV = 0.0L;
    int i;
    double sigv, label;
    for (i = 0; i < dataSet.size(); i++) {//循环遍历每个样本
        sigv = sigmoidVec[i]; //每个样本的最后sigmoid计算结果
        label = dataSet[i].label; //样本的真实标签
        //计算单个权重的梯度(负梯度)，由计算图的梯度反向传播可知：wxb()+sigmoid()+loss()对于权重w的梯度为 x*(y-t)
        gsV += (label - sigv) * (dataSet[i].features[index]); 
    }

    gsV = gsV / dataSet.size();//由于在上面的计算中累加了多个样本的结果，所以除以总样本数
    return gsV;
}

//训练
void LR::train()
{
    double sigmoidVal; //记录每次迭代中，最后Sigmoid函数的计算结果
    double wxbVal;
    int i, j;

    for (i = 0; i < maxIterTimes; i++) {
        vector<double> sigmoidVec;

        for (j = 0; j < trainDataSet.size(); j++) {//循环遍历每一个样本
            wxbVal = wxbCalc(trainDataSet[j]);//单样本的计算
            sigmoidVal = sigmoidCalc(wxbVal);//单变量Sigmoid的计算
            sigmoidVec.push_back(sigmoidVal);
        }

        for (j = 0; j < param.wtSet.size(); j++) {//循环遍历每个权重参数,计算该权重的梯度，并更新权重参数
            param.wtSet[j] += stepSize * gradientSlope(trainDataSet, j, sigmoidVec);
        }

        if (i % train_show_step == 0) {//每迭代train_show_step次，打印一次更新后的参数
            cout << "iter " << i << ". updated weight value is : ";
            for (j = 0; j < param.wtSet.size(); j++) {
                cout << param.wtSet[j] << "  ";
            }
            cout << endl;
        }
    }
}

//预测
void LR::predict()
{
    double sigVal;
    int predictVal;

    loadTestData();//加载测试数据
    for (int j = 0; j < testDataSet.size(); j++) {//循环遍历每个测试数据
        sigVal = sigmoidCalc(wxbCalc(testDataSet[j]));//单样本的计算
        predictVal = sigVal >= predictTrueThresh ? 1 : 0; //用设定好的阈值对计算结果进行判断
        predictVec.push_back(predictVal); 
    }

    storePredict(predictVec);
}

//加载以前保存的模型，即保存的权重参数
int LR::loadModel()
{
    string line;
    int i;
    vector<double> wtTmp;
    double dbt;

    ifstream fin(weightParamFile.c_str());
    if (!fin) {
        cout << "打开模型参数文件失败" << endl;
        exit(0);
    }

    getline(fin, line);
    stringstream sin(line);
    for (i = 0; i < featuresNum; i++) {
        char c = sin.peek();
        if (c == -1) {
            cout << "模型参数数量少于特征数量，退出" << endl;
            return -1;
        }
        sin >> dbt;
        wtTmp.push_back(dbt);
    }
    param.wtSet.swap(wtTmp);
    fin.close();
    return 0;
}

//保存模型，即保存权重参数
int LR::storeModel()
{
    string line;
    int i;

    ofstream fout(weightParamFile.c_str());
    if (!fout.is_open()) {
        cout << "打开模型参数文件失败" << endl;
    }
    if (param.wtSet.size() < featuresNum) {
        cout << "wtSet size is " << param.wtSet.size() << endl;
    }
    for (i = 0; i < featuresNum; i++) {
        fout << param.wtSet[i] << " ";
    }
    fout.close();
    return 0;
}

//加载测试数据
bool LR::loadTestData()
{
    ifstream infile(testFile.c_str());
    string lineTitle;

    if (!infile) {
        cout << "打开测试文件失败" << endl;
        exit(0);
    }

    while (infile) {
        vector<double> feature;
        string line;
        getline(infile, line);
        if (line.size() > featuresNum) {
            stringstream sin(line);
            double dataV;
            int i;
            char ch;
            i = 0;
            while (i < featuresNum && sin) {
                char c = sin.peek();
                if (int(c) != -1) {
                    sin >> dataV;
                    feature.push_back(dataV);
                    sin >> ch;
                    i++;
                }
                else {
                    cout << "测试文件数据格式不正确" << endl;
                    return false;
                }
            }
            testDataSet.push_back(Data(feature, 0));
        }
    }

    infile.close();
    return true;
}

bool loadAnswerData(string awFile, vector<int>& awVec)
{
    ifstream infile(awFile.c_str());
    if (!infile) {
        cout << "打开答案文件失败" << endl;
        exit(0);
    }

    while (infile) {
        string line;
        int aw;
        getline(infile, line);
        if (line.size() > 0) {
            stringstream sin(line);
            sin >> aw;
            awVec.push_back(aw);
        }
    }

    infile.close();
    return true;
}

int LR::storePredict(vector<int>& predict)
{
    string line;
    int i;

    ofstream fout(predictOutFile.c_str());
    if (!fout.is_open()) {
        cout << "打开预测结果文件失败" << endl;
    }
    for (i = 0; i < predict.size(); i++) {
        fout << predict[i] << endl;
    }
    fout.close();
    return 0;
}

int main(int argc, char* argv[])
{
    vector<int> answerVec;
    vector<int> predictVec;
    int correctCount; 
    double accurate;
    string trainFile = "data/train_data.txt"; //训练数据所在文件
    string testFile = "data/test_data.txt"; //测试数据所在文件
    string predictFile = "projects/student/result.txt"; //预测结果输出文件

    string answerFile = "projects/student/answer.txt"; //正确结果所在文件

    LR logist(trainFile, testFile, predictFile);

    cout << "ready to train model" << endl;
    logist.train();

    cout << "training ends, ready to store the model" << endl;
    logist.storeModel();

#ifdef TEST
    cout << "ready to load answer data" << endl;
    loadAnswerData(answerFile, answerVec);
#endif

    cout << "let's have a prediction test" << endl;
    logist.predict();

#ifdef TEST
    loadAnswerData(predictFile, predictVec);
    cout << "test data set size is " << predictVec.size() << endl;
    correctCount = 0;
    for (int j = 0; j < predictVec.size(); j++) {
        if (j < answerVec.size()) {
            if (answerVec[j] == predictVec[j]) {
                correctCount++;
            }
        }
        else {
            cout << "answer size less than the real predicted value" << endl;
        }
    }

    accurate = ((double)correctCount) / answerVec.size();
    cout << "the prediction accuracy is " << accurate << endl;
#endif

    return 0;
}