import torch
import yaml
import os
from utils import ReadTiff
from datetime import datetime
from models import *
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import matplotlib.pyplot as plt

# 全局目录设置--------------------------------------
repo_dir = "/root/mpm"
configs_dir = os.path.join(repo_dir,"configs")
experiments_dir="/root/autodl-fs/experiments"
os.chdir(repo_dir)

    # 获取目录中的所有文件和目录
config_files_unfiltered = os.listdir(configs_dir)

    # 过滤出以 config 开头的文件
config_files = [f for f in config_files_unfiltered if f.startswith('config')]
print(config_files)
# ---------------------------------------------------------



for config_filename in config_files:
    
    # 读取配置----------------------------------
    with open(os.path.join(configs_dir,config_filename), 'r', encoding='utf-8') as file:
        config = yaml.safe_load(file)
    # --------------------------------------------

    # 单次实验目录设置-------------------------

    # MPM
    ## train.py
    ## models.py
    ## utils
    ## configs
    ### config_1
    ### config_2
    ## experiments
    ### resnet50
    #### exp1
    #### exp2

    current_time = datetime.now() # 获取当前时间

    formatted_time = current_time.strftime("%Y-%m-%d-%H-%M-%S") # 格式化时间字符串

    exp_name = "exp_"+ formatted_time
    print(exp_name)
    os.chdir(experiments_dir)

    # 检查当前目录中是否存在名为'当前模型名'的目录
    if not os.path.exists(config['model']['name']):
        # 如果不存在，则创建该目录
        os.makedirs(config['model']['name'])

    # 进入'当前模型名'目录
    os.chdir(config['model']['name'])

    os.mkdir(exp_name)
    os.chdir(exp_name)
    os.mkdir("tb_logs")
    # os.mkdir("model")

    print(os.getcwd())
    print()
    #--------------------------------------


    # 设备----------------------------------------------------------------------
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("设备:")
    print(device)
    print()
    # -----------------------------------------------------------------------------



    # 数据预处理--------------------------------------
    myTransforms = transforms.Compose([
        ReadTiff.NdarrayToTensor()
        # transforms.Resize((224, 224)),
        # transforms.RandomHorizontalFlip(p=0.5),

        # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
    print("数据预处理:")
    print(myTransforms)
    print()
    # ----------------------------------------------------------------------------------





    # 数据集-----------------------------------------------------------------------------------

    class_definition = {"no": 0, "yes": 1}


    class MyData(Dataset):
        def __init__(self, root_dir, label_dir, transform=None):
            self.root_dir = root_dir
            self.label_dir = label_dir
            self.path = os.path.join(self.root_dir, self.label_dir)
            self.img_path = os.listdir(self.path)
            self.transform = transform

        def __getitem__(self, idx):
            img_name = self.img_path[idx]
            img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
            img = ReadTiff.readTif_to_Ndarray(img_item_path) # 读入的是ndarray
            if self.transform is not None:
               img = self.transform(img)

            label = class_definition[self.label_dir]

            return img, label

        def __len__(self):
            return len(self.img_path)


    no_dataset = MyData(config['data_path'], "no", transform=myTransforms)
    yes_dataset = MyData(config['data_path'], "yes", transform=myTransforms)

    all_dataset = no_dataset + yes_dataset

    train_dataset, test_dataset = torch.utils.data.random_split(all_dataset, [int(len(all_dataset)*0.8), int(len(all_dataset)*0.2)])

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=config['train_batch_size'], shuffle=True)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=config['test_batch_size'], shuffle=True)

    print("数据集:")
    print(f"训练集长度:{len(train_dataset)}")
    print(f"测试集长度:{len(test_dataset)}")
    print()

    # ---------------------------------------------------------------------------------------------------------

    img, label = yes_dataset[0]
    print(img.shape)

    # 模型--------------------------------------------------------------------------------------------------------
    Model = MyModel(config['model']['name'],pretrained=config['model']['pretrained'])
    Model = Model.to(device)
    print("模型:")
    print(Model)
    print()

    # 损失函数
    loss_funcs = {'CrossEntropyLoss':nn.CrossEntropyLoss(),'MSELoss':nn.MSELoss(),'L1Loss':nn.L1Loss()}
    loss_func = loss_funcs[config['loss_function']]
    loss_func = loss_func.to(device)
    print("损失函数:")
    print(loss_func)
    print()

    # 优化器
    if config['optimizer'] == 'SGD':
        optimizer = torch.optim.SGD(Model.parameters(), lr=config['learning_rate'], momentum=config['momentum'])
    elif config['optimizer'] == 'Adam':
        optimizer = torch.optim.Adam(Model.parameters(), lr=config['learning_rate'])
    else:
        print("优化器不存在")
        exit()

    print("优化器:")
    print(optimizer)
    print()

    # 日志
    writer = SummaryWriter("./tb_logs")

    # 训练
    for _epoch in range(config['epoch']):
        print("第{}轮训练开始:----------------------".format(_epoch + 1))
        training_loss = 0.0
        for _step, input_data in enumerate(train_dataloader):
            image, label = input_data[0].to(device), input_data[1].to(device)   # GPU加速
            predict_label = Model.forward(image)

            loss = loss_func(predict_label, label)


            writer.add_scalar('training loss', loss, global_step = _epoch*len(train_dataloader) + _step)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            training_loss = training_loss + loss.item()
            if _step % 10 == 0 :
                print('[iteration - %3d] training loss: %.3f' % (_epoch*len(train_dataloader) + _step, training_loss/10))
                training_loss = 0.0
                print()

        # 验证:
        print("第{}轮验证开始:--------------------------".format(_epoch + 1))

        correct = 0
        total = 0

        all_labels = []
        all_predicted_probs = []

        Model.eval()
        with torch.no_grad():
            for images,labels in test_dataloader:

                images = images.to(device)
                labels = labels.to(device)
                outputs = Model(images)
                numbers,predicted = torch.max(outputs.data,1)
                total += labels.size(0)
                print(f"labels:{labels}")
                print(f"predicted:{predicted}")
                correct += (predicted==labels).sum().item()

                # 保存所有的真实标签和预测概率
                all_labels.extend(labels.cpu().numpy())
                all_predicted_probs.extend(torch.softmax(outputs, dim=1)[:, 1].cpu().numpy())

            print('Testing Accuracy : %.3f %%' % ( 100 * correct / total))
            writer.add_scalar('test_Accuracy',100 * correct / total,global_step=_epoch)

        if _epoch==config['epoch']-1:
            import numpy as np
            import matplotlib.pyplot as plt

            # 计算正样本总数
            total_positive = np.sum(all_labels)

            # 计算测试集总样本数
            total_test = len(all_labels)

            # 设定一系列概率阈值
            thresholds = np.linspace(0, 1, 100)
            x_values = []
            y_values = []

            for threshold in thresholds:
                # 根据阈值将预测概率转换为预测标签
                predicted_labels = (all_predicted_probs >= threshold).astype(int)

                # 计算所有被预测为正的样本总数
                total_predicted_positive = np.sum(predicted_labels)
                # 计算横轴的值
                x = total_predicted_positive / total_test

                all_labels_np = np.array(all_labels, dtype=np.int64)  # 转换为 int64 类型的 NumPy 数组

                # 计算所有本身标签为正且被预测为正的样本总数
                true_positive = np.sum((all_labels_np == 1) & (predicted_labels == 1))
                # 计算纵轴的值
                y = true_positive / total_positive if total_positive > 0 else 0.0

                x_values.append(x)
                y_values.append(y)

            # 绘制预测成功率曲线
            plt.figure()
            plt.plot(x_values, y_values, color='darkorange', lw=2)
            plt.xlabel('percentage of prospective')
            plt.ylabel('success rate')
            plt.title('prediction_success_rate_curve')
            plt.savefig('prediction_success_rate_curve.png')

    # --------------------------------------------------------------------------------------------

    # 2/3 我注释了这里，因为我发现保存模型参数对我们目前来说意义不大
    # 保存模型----------------------------------------------------------------------------------------
    # torch.save(Model, './model/model.pkl')
    # print("模型已保存")
    # --------------------------------------------------------------------------------------------------


    # 保存图片-------------------------------------------------------------------------------------------
    # 假设事件文件位于./logs目录下
    event_file = os.path.join('./tb_logs', os.listdir('./tb_logs')[0])

    # 创建一个EventAccumulator对象来读取事件文件
    event_acc = EventAccumulator(event_file)
    event_acc.Reload()

    # 获取所有的标量数据
    scalars = event_acc.Scalars('training loss')

    # 使用matplotlib绘制图表
    plt.figure()
    plt.plot([s.step for s in scalars], [s.value for s in scalars], label='Loss')
    plt.xlabel('Step')
    plt.ylabel('Loss')
    plt.title('training Loss')
    plt.legend()

    # 保存图表为图片
    plt.savefig('training_loss.png')

    # 获取所有的标量数据
    scalars_2 = event_acc.Scalars('test_Accuracy')

    # 使用matplotlib绘制图表
    plt.figure()
    plt.plot([s.step for s in scalars_2], [s.value for s in scalars_2], label='Loss')
    plt.xlabel('Step')
    plt.ylabel('Loss')
    plt.title('test_Accuracy')
    plt.legend()

    # 保存图表为图片
    plt.savefig('test_Accuracy.png')

    # -------------------------------------------------------------------------------------------------


    # 记录配置-------------------------------------------------------------------------------------------
    with open('config.yaml', 'w', encoding='utf-8') as file:
        yaml.dump(config, file, default_flow_style=False, allow_unicode=True)
    # ---------------------------------------------------------------------------------------------------


    # 目录跳回----------------------------------------------------------------------------------------
    os.chdir(repo_dir)
    # --------------------------------------------------------------------------------------------------



    writer.close()



