import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support
from torch.utils.data import DataLoader
from utils import TrainDataSet

# 设置随机种子
torch.manual_seed(10)
torch.cuda.manual_seed_all(10)
torch.backends.cudnn.deterministic = True  # 保证每次结果一样


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.fc1 = nn.Linear(1467, 150)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.14)
        self.fc2 = nn.Linear(150, 2)

    def forward(self, x):
        output = self.fc1(x)
        output = self.relu(output)
        output = self.dropout(output)
        output = self.fc2(output)
        return output

class Cls():
    def __init__(self):
        self.pair = None
        self.label = None
        # text_review的位置
        self.pair_location = 'dataSet/saved_dict/save_text_review.pth'
        # label的位置
        self.label_location = 'dataSet/saved_dict/rumor_result.pth'
        # pair的条目数
        self.pair_len = 0
        # 定义损失函数
        self.loss_fn = nn.CrossEntropyLoss(reduction='sum')
        # 设置超参数
        self.learning_rate = 5e-4
        self.weight_decay = 0.1
        self.num_epoch = 20
        self.batch_size = 128

    def load_data(self):
        # 读取data和label
        self.pair = torch.load(self.pair_location).to(torch.float32)
        self.label = torch.load(self.label_location).to(torch.float32)
        self.pair_len = self.pair.size()[0]

    def divide_data(self):
        # 按照9:1的比例划分训练集和测试集
        pair_train = self.pair[:int(0.9 * self.pair_len)]
        pair_test = self.pair[int(0.9 * self.pair_len):]
        label_train = self.label[:int(0.9 * self.pair_len)]
        label_test = self.label[int(0.9 * self.pair_len):]
        return pair_train, label_train, pair_test, label_test

    '''5折交叉验证'''
    def build_kFold_dataSet(self):
        kf = KFold(n_splits=5, shuffle=False)
        fold = 1
        # 测试集的平均acc和loss
        test_average_acc = 0
        test_average_loss = 0
        test_all_pre = 0
        test_all_recall = 0
        test_all_f1 = 0
        # 切割成五个数据集
        for train_index, test_index in kf.split(self.pair):
            print('**********************' + 'Fold' + str(fold) + '***************************')
            pair_train = self.pair[train_index]
            label_train = self.label[train_index]
            pair_test = self.pair[test_index]
            label_test = self.label[test_index]
            # 将训练集和验证集变成iter型
            trainDataset = TrainDataSet(pair_train, label_train)
            train_iter = DataLoader(trainDataset, self.batch_size, shuffle=True)
            testDataset = TrainDataSet(pair_test, label_test)
            test_iter = DataLoader(testDataset, batch_size=self.batch_size, shuffle=True)
            test_acc, test_loss, pre, recall, f1, sup = self.train(train_iter, test_iter)
            # 将数据累加以计算平均值
            test_average_acc += test_acc
            test_average_loss += test_loss
            test_all_pre += pre
            test_all_recall += recall
            test_all_f1 += f1
            fold += 1
        # 打印K折的平均准确度和损失
        print("5-Fold Test Acc:{0:>7.2%}".format(test_average_acc / 5))
        print("5-Fold Test Loss:{0:>5.2}".format(test_average_loss / 5))
        print('Fake: pre:{0:>6.2%},rec:{1:>6.2%},f1:{2:>6.2%}'.format((test_all_pre / 5)[1], (test_all_recall / 5)[1], (test_all_f1 / 5)[1]))
        print('Real: pre:{0:>6.2%},rec:{1:>6.2%},f1:{2:>6.2%}'.format((test_all_pre / 5)[0], (test_all_recall / 5)[0], (test_all_f1 / 5)[0]))
        print('Total: pre:{0:>6.2%},rec:{1:>6.2%},f1:{2:>6.2%}'.format(np.mean(test_all_pre / 5), np.mean(test_all_recall / 5), np.mean(test_all_f1 / 5)))

    '''5Fold 训练方法'''
    def train(self, train_iter, test_iter):
        # 重新装载模型
        model = Model()
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=self.learning_rate,
                                     weight_decay=self.weight_decay)
        # 最佳损失
        bestLoss = float('inf')
        total_batch = 0  # 记录进行到多少batch
        # 开始训练
        for epoch in range(self.num_epoch):
            print('Epoch [{}/{}]'.format(epoch + 1, self.num_epoch))
            for i, (trains, labels) in enumerate(train_iter):
                # 向前传播
                y_pred = model(trains)
                # 计算损失
                loss = self.loss_fn(y_pred, labels)
                # 在我们进行梯度更新之前，先使用optimier对象提供的清除已经积累的梯度。
                optimizer.zero_grad()
                # 计算梯度
                loss.backward()
                # 更新梯度
                optimizer.step()
                # 显示损失
                if total_batch % 25 == 0:
                    # true = labels.data.cpu()
                    true = torch.max(labels.data, 1)[1].cpu()
                    predic = torch.max(y_pred.data, 1)[1].cpu()
                    # y_pred = y_pred.detach().numpy()
                    train_acc = metrics.accuracy_score(true, predic)
                    if loss.item() < bestLoss:
                        bestLoss = loss.item()
                        # 保存模型
                        torch.save(model.state_dict(), 'dataSet/saved_dict/MLP.ckpt')
                        improve = '*'
                    else:
                        improve = ''
                    msg = 'Iter: {0:>6},  Train Loss: {1:>5.2},  Train Acc: {2:>7.2%} {3}'
                    print(msg.format(total_batch, loss.item() / self.batch_size, train_acc, improve))
                total_batch += 1
        test_acc, test_loss, pre, recall, f1, sup = self.test(model, test_iter)
        return test_acc, test_loss, pre, recall, f1, sup


    def test(self, model, test_iter):
        model.load_state_dict(torch.load('dataSet/saved_dict/MLP.ckpt'))
        model.eval()
        test_acc, test_loss, pre, recall, f1, sup, test_report = self.evaluate(model, test_iter, test=True)
        msg = 'Test Loss: {0:>5.2},  Test Acc: {1:>6.2%}'
        print(msg.format(test_loss, test_acc))
        print("Precision, Recall and F1-Score...")
        print(test_report)
        return test_acc, test_loss, pre, recall, f1, sup


    def evaluate(self, model, data_iter, test=False):
        model.eval()
        loss_total = 0
        predict_all = np.array([], dtype=int)
        labels_all = np.array([], dtype=int)
        with torch.no_grad():
            # Tensor(batch_size, pad_size)
            for texts, labels in data_iter:
                # Tensor(batch_size, class_num)
                outputs = model(texts)
                loss = F.cross_entropy(outputs, labels)
                loss_total += loss
                # Tensor(batch_size, 1)
                labels = torch.max(labels.data, 1)[1].cpu().numpy()
                # Tensor(batch_size, 1)
                predic = torch.max(outputs.data, 1)[1].cpu().numpy()
                labels_all = np.append(labels_all, labels)
                predict_all = np.append(predict_all, predic)

        acc = metrics.accuracy_score(labels_all, predict_all)
        if test:
            pre, recall, f1, sup = precision_recall_fscore_support(labels_all, predict_all)
            report = metrics.classification_report(labels_all, predict_all, target_names=['Real', 'Fake'], digits=4)
            return acc, loss_total / len(data_iter), pre, recall, f1, sup, report
        return acc, loss_total / len(data_iter)



if __name__ == '__main__':
    cls = Cls()
    cls.load_data()
    # print(cls.label[3000])
    cls.build_kFold_dataSet()
    # pair_train, label_train, pair_test, label_test = cls.divide_data()
    # cls.train(pair_train, label_train, pair_test, label_test)
    # model = Model()
    # cls.test(model, pair_test, label_test)


