from Trainer.trainer import Trainer
from Dataloaders.DataGroup.datagroup import DataGroup
from Dataloaders.txt_dataloader import TxtDataLoader
from tqdm import tqdm
import os
import torch
import numpy as np
import time
from utils import setup_seed
from torch.utils.tensorboard import SummaryWriter

class DDLATrainer(Trainer):
    def __init__(self, config):
        self.config = config
        self.train_cfg = config["trainer"]
        setup_seed(self.train_cfg["seed"])
        self.device = self.train_cfg["device"]
        self.train_time = 0
        self.set_loss()
        self.set_net()
        self.load_weight()
        self.txt_loader = TxtDataLoader(config)
        self.data_group = DataGroup(config)

        self.writer_list = []

        for i in range(256):
            self.writer_list.append(SummaryWriter(f"./project_datasave/{self.config['project_name']}/log/{i}"))
        self.writer = SummaryWriter(f"./project_datasave/{self.config['project_name']}/log/")

        if (self.data_group.check_group() == False):
            print("进行遍历猜测密钥&&数据分组操作:")
            group_start_time = time.time()
            self.data_group.generate_txt_group()
            group_end_time = time.time()
            self.group_time = group_end_time - group_start_time
        else:
            print("数据分组已经存在，无需再次生成")
        input_test_data = next(iter(self.txt_loader.get_train_loader(0)))[0]
        self.writer.add_graph(self.model, input_test_data.to(self.device))

        # 分组所花时间
        self.group_time = 0
    
    def train(self):
        epochs = self.train_cfg["epochs"]
        loss_all_model = []
        train_start_time = time.time()
        for k in range(256):
            print(f"运行猜测密钥:{k}")
            # 加载txt_loader生成数据集和标签  
            self.train_loader = self.txt_loader.get_train_loader(k)
            self.set_net()
            # 开始训练
            self.model.train()
            loss_save_list = []

            for epoch in range(epochs):
                running_loss = 0.0
                print(f"epoch: {epoch}")
                for i, data in enumerate(tqdm(self.train_loader), 0):
                    labels = data[-1]
                    labels = labels.to(self.device)
                    inputs = []
                    for j in range(len(data) - 1):
                        inputs.append(data[j].to(self.device))
                    self.optimizer.zero_grad()
                    loss = 0.0
                    for input in inputs:
                        outputs = self.model(input)
                        if (isinstance(outputs, list)):
                            for i in range(self.train_cfg["num_sub_heads"]):
                                loss += self.loss(outputs[i], labels)
                        else:
                            loss += self.loss(outputs, labels)
                    loss.backward()
                    self.optimizer.step()
                    running_loss += loss.item()
                self.writer_list[k].add_scalar(f"loss", running_loss / len(self.train_loader), epoch)
                print(f"Loss: {running_loss/ len(self.train_loader)}")
                loss_save_list.append(running_loss / len(self.train_loader))
                self.save_model(epoch, k)
            train_end_time = time.time()
            self.train_time = train_end_time - train_start_time
            loss_all_model.append(loss_save_list)

        # 保存损失记录
        self.save_loss(loss_all_model)
        pass

    def save_model(self, epoch, key):
        if not os.path.exists("./project_datasave/" + self.config["project_name"] + "/model/" + str(key) + "/every_epochs"):
            os.makedirs("./project_datasave/" + self.config["project_name"] + "/model/" + str(key) + "/every_epochs")
        # 保存权重模型
        torch.save(self.model.state_dict(), "./project_datasave/" + self.config["project_name"] + "/model/" + str(key) + "/every_epochs/model_" + str(epoch) + ".pth")

    def save_loss(self, loss_all_model):
        
        print(loss_all_model)

        min_model_loss = 99999
        min_model_index = 0
        with open("./project_datasave/" + self.config["project_name"] + "/log/loss.txt", "w") as f:
            for i, loss_save_list in enumerate(loss_all_model):
                for loss_epoch in loss_save_list:
                    f.write(str(loss_epoch) + " ")
                if (min_model_loss >= min(loss_save_list)):
                    min_model_loss = min(loss_save_list)
                    min_model_index = i
                f.write("\n")
        print(f"最小损失模型:{min_model_index}, 损失值:{min_model_loss}")
        # 绘制损失曲线
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots()
        for loss_save_list in loss_all_model:
            ax.plot(range(len(loss_save_list)), loss_save_list)
        plt.xlabel("epoch")
        plt.ylabel("loss")
        plt.savefig("./project_datasave/" + self.config["project_name"] + "/figure/loss.png")
