import os
import sys
import json
import pickle

HOME = os.path.split(os.path.abspath(__file__))[0]      # 获取当前文件的绝对路径
sys.path.append(HOME + '/lib')                          # 将其所在目录的子目录添加在Python搜索路径中

import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping    # EarlyStopping回调函数用于避免过拟合

from utils import get_best_f1, get_data_dim, get_data_name
from data_preprocessor import DataPreprocessor
from mstvae_model import AverageLossCallback, MSTVAEModel
from optimizer import create_optimizer

from tensorflow.keras.models import model_from_json     # 用于从JSON格式的模型描述中恢复模型

def load_train_config(filepath):
    """加载指定路径的训练配置文件"""
    try:
        with open(filepath, 'r') as f:
            config = json.load(f)
            return config
    except Exception as e:
        raise Exception('Failed to load config from file {}\n{}'.format(filepath, e))

def run_experiment():
    """运行实验"""

    #-------------------------------加载配置-------------------------------

    use_config_from_file = True        # 选择是否从配置文件中加载配置
    config = {
        'dataset': 'machine-1-1',
        'z_dim': 6,
        'x_dim': 38,
        'hidden_size': 128,
        'logstd_min': -5,
        'logstd_max': 2,
        'adjusted_channels': 64,
        'short_scale_layers': 2,
        'short_scale_filters': 64,
        'long_scale_layers': 1,
        'long_scale_filters': 64,
        'final_channels': 128,
        'pool_size': 6,
        'strides': 6,
        'l2_reg': 0.001,
        'window_size': 30,
        'n_samples': 100,
        'learning_rate': 0.001,
        'lr_anneal_factor': 0.5,
        'lr_anneal_epoch_freq': 30,
        'n_mc_chain': 10,
        'mcmc_iter': 10,
        'num_epochs': 10,
        'validation_split': 0.3,
        'train_batch_size': 100,
        'test_batch_size': 50
    }

    # 从 train_config.json 中加载配置参数，并更新config
    if use_config_from_file:
        dataname = get_data_name(config['dataset'])
        cfg_train = load_train_config('train_config.json')
        config.update(cfg_train[dataname])
        print(json.dumps(config, indent=4))

    #-------------------------------数据预处理-------------------------------

    datapath = 'data/processed/' + config['dataset']        # 设置数据处理后存储路径
    config['x_dim'] = get_data_dim(config['dataset'])       # 从配置中获取并更新 x_dim 参数

    # 实例化DataPreprocessor对象，传入窗口大小参数，用于后续的数据处理
    data_preprocessor = DataPreprocessor(config['window_size']) 
    # 从 datapath 加载训练数据、测试数据、测试数据标签
    train_data, test_data, test_label = data_preprocessor.load_data(datapath)
    # 对训练和测试数据进行缩放处理（标准化 + 归一化）
    scaled_train_data = data_preprocessor.transform(train_data, build_scaler=True)
    scaled_test_data = data_preprocessor.transform(test_data)
    # 将缩放后的训练数据分割为训练集和验证集
    scaled_train_data, scaled_val_data = data_preprocessor.train_val_split(scaled_train_data,
                                                                           validation_split=config['validation_split'])
    # 生成滑动窗口训练数据
    sliding_train_data, num_train = data_preprocessor.generate_sliding_data(scaled_train_data,
                                                                 batch_size=config['train_batch_size'],
                                                                 shuffle=True)
    # 生成滑动窗口验证数据
    sliding_val_data, num_val = data_preprocessor.generate_sliding_data(scaled_val_data,
                                                               batch_size=config['train_batch_size'],
                                                               shuffle=False)
    # 生成滑动窗口测试数据
    sliding_test_data, num_test = data_preprocessor.generate_sliding_data(scaled_test_data,
                                                                batch_size=config['test_batch_size'],
                                                                shuffle=False)

    #-------------------------------模型实例化-------------------------------

    detector = MSTVAEModel(config)      # 实例化MSTVAE模型，传入配置参数
    # 创建优化器
    optimizer = create_optimizer(num_X_train=num_train,
                                 batch_size=config['train_batch_size'],
                                 epochs=config['num_epochs'])
    # 指定优化器编译模型
    detector.compile(optimizer=optimizer)

    # -------------------------------模型训练-------------------------------

    # 创建早停回调，以监控验证损失并在连续5个epoch未改善时停止训练
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=5,
                                   restore_best_weights=True)
    # 模型训练
    detector.fit(x=sliding_train_data,
                   validation_data=sliding_val_data,
                   epochs=config['num_epochs'],
                   callbacks=[AverageLossCallback(), early_stopping])

    # 保存模型
    model_json = detector.to_json()
    model_json = json.loads(model_json)
    with open('saved_model/mstvae.json', 'w') as f:
        json.dump(model_json, f, indent=4)
    detector.save_weights('saved_model/mstvae.h5')

    # 从文件加载模型
    with open('saved_model/mstvae.json', 'r') as f:
        model_json = f.read()
    detector = model_from_json(model_json, custom_objects={'MSTVAEModel': MSTVAEModel})
    detector.load_weights('saved_model/mstvae.h5')

    #-------------------------------模型测试-------------------------------

    # 计算重构概率和异常分数
    recons_values, anomaly_scores = detector.calculate_anomaly_scores(sliding_test_data,get_last_obser=True)
    # 获取最佳F1值
    test_labels = test_label[config['window_size'] - 1:]
    # 根据异常分数和真实标签计算结果
    results = get_best_f1(anomaly_scores, test_labels)
    # 提取最佳F1分数、精确率、召回率、真阳性、真阴性、假阳性、假阴性以及阈值。
    best_f1 = results[0][0]
    precision = results[0][1]
    recall = results[0][2]
    TN = results[0][3]
    TP = results[0][4]
    FP = results[0][5]
    FN = results[0][6]
    threshold = results[1]
    # 输入到csv文件中

    # 打印评估结果
    print('Evaluation results: \nBest F1: {}\nPrecision: {}\nRecall: {}\nTP: {}\nTN: {}\nFP: {}\nFN: {}\n'
          'threshold: {}'.format(best_f1, precision, recall, TP, TN, FP, FN, threshold))

    print(config['dataset'])

if __name__ == '__main__':
    # 测试gpu是否可用
    if tf.test.is_gpu_available():
        print("GPU is available")
    else:
        print("GPU is not available")

    # 运行实验
    run_experiment()