import argparse
import sys
import torch
import pickle
from PyQt5.QtWidgets import QApplication

import data_preprocess
import main
import model
from pathlib import Path
from matplotlib import pyplot as plt
from calculate import cal_anomaly_score
from calculate import cal_metrics

# 生成控制台信息界面
app = QApplication(sys.argv)
win1 = main.SecondForm()
win1.show()


def show_console(text):
    """
    动态显示控制台信息
    :return:
    """
    win1.textEdit.append(text)
    win1.textEdit.moveCursor(win1.textEdit.textCursor().End)
    QApplication.processEvents()


# 设置检测参数并解析
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='ecg', help='数据集种类')
parser.add_argument('--filename', type=str, default='chfdb_chf01_275.pkl', help='数据集文件名')
parser.add_argument('--save_fig', type=bool, default=True, help='是否生成图像')
args_ = parser.parse_args()

# 加载模型并初始化参数
print('-' * 88)
show_console('-' * 60)
print('=> Loading arguments of the model at checkpoint...')
show_console('=> Loading arguments of the model at checkpoint...')
checkpoint = torch.load(str(Path('save', args_.data, 'model_best', args_.filename).with_suffix('.pth')))
args = checkpoint['args']  # 加载模型参数
print('   Load successfully!')
show_console('   Load successfully!')

# 设置随机种子，确保每次实验产生的随机数是相同的
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

# 加载数据集
all_dataset = data_preprocess.PreprocessData(args.data, args.filename, augment=False)
test_dataset = all_dataset.batch(args, all_dataset.testData, batch_size=1)  # 测试集

# 构建模型
feature_dim = all_dataset.trainData.size(-1)  # 输入特征的维数
model = model.Predictor(args.model, feature_dim, args.input_size, args.units_num, feature_dim,
                        args.layers_num).to(args.device)
model.load_state_dict(checkpoint['state_dict'])  # 预训练权重和偏置加载


def output_metric_fig():
    """
    将性能指标保存为图片
    :return:
    """
    print('=> Saving the metrics as pictures...')
    show_console('=> Saving the metrics as pictures...')

    fig_dir = Path('result', args.data, args.filename).with_suffix('').joinpath('figures')
    fig_dir.mkdir(parents=True, exist_ok=True)

    plt.xlabel('Threshold')  # x轴
    plt.ylabel('Value')  # y轴
    plt.plot(accuracy.cpu().numpy(), label='accuracy')  # 准确率
    plt.plot(precision.cpu().numpy(), label='precision')  # 精确率
    plt.plot(recall.cpu().numpy(), label='recall')  # 召回率
    plt.plot(f1_score.cpu().numpy(), label='f1-score')  # f1分数值
    plt.legend()  # 图例
    plt.savefig(str(fig_dir.joinpath('metricFig_channel' + str(channel_idx)).with_suffix('.png')))
    plt.close()

    print('   Save successfully!')
    show_console('   Save successfully!')


def output_anomaly_fig():
    """
    将异常检测结果保存为图片
    :return:
    """
    print('=> Saving the anomaly results as pictures...')
    show_console('=> Saving the anomaly results as pictures...')

    fig_dir = Path('result', args.data, args.filename[:-4], 'figures')

    # 保存异常检测情况
    fig, ax1 = plt.subplots(figsize=(15, 5))  # 生成坐标系1
    ax1.set_xlabel('Index', fontsize=15)  # x轴
    ax1.set_ylabel('Variable' + str(channel_idx+1), fontsize=15)  # y轴

    ax1.plot(target, label='Target', color='black', marker='.', linestyle='-', markersize=1, linewidth=0.5)  # 待检测序列
    # ax1.plot(mean_prediction, label='Prediction', color='blue', marker='.', linestyle='-',
    #          markersize=1, linewidth=0.5)  # 预测序列

    # 绘制参考序列
    # if channel_idx == 0:
    #     x = []
    #     y = []
    #     for i in range(500, 650):
    #         x.append(i)
    #         y.append(target[i])
    #     ax1.plot(x, y, color='red', marker='.', linestyle='-', markersize=2, linewidth=2)
    #
    # if channel_idx == 1:
    #     x = []
    #     y = []
    #     for i in range(520, 650):
    #         x.append(i)
    #         y.append(target[i])
    #     ax1.plot(x, y, color='red', marker='.', linestyle='-', markersize=2, linewidth=2)

    # x = []
    # y = []
    for i in range(len(anomaly_arr)):
        x = []
        y = []
        for j in range(len(anomaly_arr[i])):
            x.append(anomaly_arr[i][j])
            y.append(target[anomaly_arr[i][j]])
        ax1.plot(x, y, color='red', marker='.', linestyle='-', markersize=2, linewidth=2)

    ax1.legend(loc='upper left')  # 图例1

    # 显示异常分数值
    # ax2 = ax1.twinx()  # 生成共享x轴的坐标系2
    # ax2.set_ylabel('Anomaly_score', fontsize=15)  # y轴
    # ax2.plot(anomaly_score.numpy().reshape(-1, 1),
    #          label='Anomaly Scores', color='purple', marker='.', linestyle='-', markersize=1, linewidth=2)
    # ax2.legend(loc='upper right')  # 图例2

    plt.tight_layout()  # 自动调整子图参数
    plt.xlim([0, len(test_dataset)])  # 设置x轴数值表示范围
    plt.savefig(str(fig_dir.joinpath('anomalyFig_channel' + str(channel_idx)).with_suffix('.png')))
    plt.close()

    print('   Save successfully!')
    show_console('   Save successfully!')


# 将结果保存到pkl文件
def save_result():
    print('=> Saving the results as pickle files...')
    show_console('=> Saving the results as pickle files...')

    save_dir = Path('result', args.data, args.filename).with_suffix('')
    save_dir.mkdir(parents=True, exist_ok=True)
    pickle.dump(anomaly_scores, open(str(save_dir.joinpath('anomaly_score.pkl')), 'wb'))
    pickle.dump(precisions, open(str(save_dir.joinpath('precision.pkl')), 'wb'))
    pickle.dump(recalls, open(str(save_dir.joinpath('recall.pkl')), 'wb'))
    pickle.dump(f1_scores, open(str(save_dir.joinpath('f1_score.pkl')), 'wb'))
    pickle.dump(targets, open(str(save_dir.joinpath('target.pkl')), 'wb'))
    pickle.dump(mean_predictions, open(str(save_dir.joinpath('mean_predictions.pkl')), 'wb'))
    pickle.dump(one_step_predictions, open(str(save_dir.joinpath('one_step_predictions.pkl')), 'wb'))
    pickle.dump(multi_step_predictions, open(str(save_dir.joinpath('multi_step_predictions.pkl')), 'wb'))

    print('   Save successfully!')
    show_console('   Save successfully!')
    print('-' * 88)
    show_console('-' * 60)


anomaly_scores = []  # 异常分数值
accuracies = []  # 准确率
precisions = []  # 精确率
recalls = []  # 召回率
f1_scores = []  # f1-score
anomalies = []  # 异常点
targets = []  # 实际值
mean_predictions = []  # 预测平均值
one_step_predictions = []  # 单步预测
multi_step_predictions = []  # 多步预测

try:
    for channel_idx in range(feature_dim):
        # 加载均值和协方差
        print('=> Loading the pre_calculated mean and covariance...')
        show_console('=> Loading the pre_calculated mean and covariance...')
        mean, covariance = checkpoint['means'][channel_idx], checkpoint['covariances'][channel_idx]

        # 参照训练集上计算出来的均值和协方差，计算测试数据集的异常分数值
        print('=> Calculating anomaly scores...')
        show_console('=> Calculating anomaly scores...')
        anomaly_score, predictions, errors = cal_anomaly_score(args, model, test_dataset, mean, covariance,
                                                               channel_idx=channel_idx)

        # 计算出性能指标，评估检测结果
        print('=> Calculating accuracy, precision, recall and f1-score...')
        show_console('=> Calculating accuracy, precision, recall and f1-score...')
        accuracy, precision, recall, f1_score, t_i, anomaly_arr = cal_metrics(args, anomaly_score,
                                                                              all_dataset.testLabel.to(args.device),
                                                                              steps=len(test_dataset))
        print('data: ', args.data, '    filename: ', args.filename)
        show_console('data: ' + args.data + '    filename: ' + args.filename)
        print('accuracy: ', accuracy[t_i].item(), '     precision: ', precision[t_i].item(),
              '    recall: ', recall[t_i].item(), '    f1_score: ', f1_score.max().item())
        show_console('accuracy: ' + str(accuracy[t_i].item()) + '    precision: ' + str(precision[t_i].item()) +
                     '    recall: ' + str(recall[t_i].item()) + '    f1_score: ' + str(f1_score.max().item()))

        # 数据去标准化
        target = data_preprocess.restore(test_dataset.cpu()[:, 0, channel_idx],
                                         all_dataset.mean[channel_idx],
                                         all_dataset.std[channel_idx]).numpy()  # 去标准化的样本实际值
        mean_prediction = data_preprocess.restore(predictions.mean(dim=1).cpu(),
                                                  all_dataset.mean[channel_idx],
                                                  all_dataset.std[channel_idx]).numpy()  # 去标准化的预测平均值
        one_step_prediction = data_preprocess.restore(predictions[:, -1].cpu(),
                                                      all_dataset.mean[channel_idx],
                                                      all_dataset.std[channel_idx]).numpy()  # 去标准化的单步预测值
        multi_step_prediction = data_preprocess.restore(predictions[:, 0].cpu(),
                                                        all_dataset.mean[channel_idx],
                                                        all_dataset.std[channel_idx]).numpy()  # 去标准化的多步预测值
        mean_errors = (errors.abs().mean(dim=1).cpu() * all_dataset.std[channel_idx]).numpy()
        anomaly_score = anomaly_score.cpu()

        anomaly_scores.append(anomaly_score)
        accuracies.append(accuracy)
        precisions.append(precision)
        recalls.append(recall)
        f1_scores.append(f1_score)
        anomalies.append(anomaly_arr)
        targets.append(target)
        mean_predictions.append(mean_prediction)
        one_step_predictions.append(one_step_prediction)
        multi_step_predictions.append(multi_step_prediction)

        # 将性能指标和异常检测结果输出为图片
        if args_.save_fig:
            output_metric_fig()
            output_anomaly_fig()

# 使用ctrl + c提前退出检测
except KeyboardInterrupt:
    print('-' * 88)
    show_console('-' * 60)
    print('=> Quit test early')
    show_console('=> Quit test early')

# 将结果保存到pkl文件
save_result()

sys.exit(app.exec_())
