import sys
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader


class Trainer:
    def __init__(self, dataname, model, optimizer, criterion, device):
        self.dataname = dataname
        self.model = model.to(device)
        self.criterion = criterion
        self.optimizer = optimizer
        self.device = device

    def train(self, 
              trainloader: DataLoader, 
              valloader: DataLoader, 
              target_mean, 
              target_std, 
              base_mean, 
              base_std, 
              epochs: int, 
              valepoch: int=1, 
              patience: int=3
              ):
        '''
        训练方法，最核心的方法
        
        Args:
            trainloader (DataLoader): 训练数据
            valloader (DataLoader): 验证数据
            target_mean (): 
            target_std ():
            base_mean ():
            base_std ():
            epochs (int): 训练轮数
            valepoch (int): 每隔多少次训练进行一次验证
            patience (int): 早停耐心值
        '''
        notgood = 0
        bestloss = sys.float_info.max
        eta = 0.001
        for epoch in range(epochs):
            self.model.train()
            total_loss = 0
            for x, graph, targets, base in tqdm(trainloader, desc=f"training epoch {epoch}"):
                # 移动设备
                x = x.to(self.device)
                graph = graph.to(self.device)
                targets = targets.to(self.device)
                self.optimizer.zero_grad()
                # 模型输出
                outputs = self.model(x, graph)
                # calculation loss
                loss = self.criterion(outputs, targets)
                loss.backward()
                self.optimizer.step()
                total_loss += loss.item()
            print(f"Epoch {epoch+1}, Training Loss: {total_loss / len(trainloader)}")
            if epoch % valepoch == 0:
                if epoch % 5 == 4:
                    eta *= 0.7
                self.model.eval()
                valloss = self.val(valloader, target_mean, target_std, base_mean, base_std)
                print(f"valloss is {valloss}")
                if valloss < bestloss:
                    notgood = 0
                    print(f"best loss from {bestloss} to {valloss}")
                    bestloss = valloss
                    torch.save(self.model, f"pths/{self.dataname}.pth")
                elif abs(valloss - bestloss) > eta:
                    notgood += 1
                    print(f"early stopping {notgood}/{patience}")
                if notgood == patience:
                    break

    def val(self, valloader, target_mean, target_std, base_mean, base_std):
        total_loss = 0.0
        self.model.eval()
        for x, graph, targets, base in tqdm(valloader, desc=f"valing"):
            # 移动设备
            x = x.to(self.device)
            graph = graph.to(self.device)
            targets = targets.to(self.device)
            # forward
            outputs = self.model(x, graph)
            # calculation loss
            loss = self.criterion(outputs, targets)
            total_loss += loss.item()
        return total_loss / len(valloader)

    def predict(self, dataloader, target_mean, target_std, base_mean, base_std, use_base=True):
        # origin = []
        res = []
        target = []
        self.model.eval()
        idx = 0
        for x, graph, targets, base in dataloader:
            # 移动设备
            x = x.to(self.device)
            graph = graph.to(self.device)
            targets = targets.to(self.device)
            if use_base:
                base = base.to(self.device)
                # get base
                base = base * base_std + base_mean
            else:
                base = 0
            # get outputs, denorm and add base
            ty, ta, adj_g, adj_a, adj_d, adj_s = self.model(x, graph, is_training=False)
            if idx % 100 == 0:
                import matplotlib.pyplot as plt
                import seaborn as sns
                '''
                for j in range(10):
                    for i in range(ta.shape[1]):
                        plt.figure(figsize=(12, 10))
                        test_a = ta[j, i].detach().cpu().numpy()
                        sns.heatmap(test_a, cmap="coolwarm", annot=False, fmt=".2f")

                        plt.xlabel("Key Index")
                        plt.ylabel("Query Index")
                        plt.title(f"Attention Heatmap ({j}'s Head {i})")
                        plt.show()

                
                print(adj.shape)
                plt.figure(figsize=(12, 10))
                test_a = adj[0].detach().cpu().numpy()
                sns.heatmap(test_a, cmap="coolwarm", annot=False, fmt=".2f")

                plt.xlabel("Node Index")
                plt.ylabel("Node Index")
                plt.title(f"Correlation Heatmap")
                plt.show()
                '''
                def draw_adj(adj_temp, filename):
                    test_a = adj_temp[0].detach().cpu().numpy()
                    sns.heatmap(test_a, cmap="coolwarm", annot=False, fmt=".2f", xticklabels=range(1, test_a.shape[1] + 1), yticklabels=range(1, test_a.shape[0] + 1))
                    plt.xlabel("Node Index")
                    plt.ylabel("Node Index")
                    plt.title("Dynamic graph")
                    plt.savefig(filename)
                    plt.close()

                def print_adj(adj_temp, filename):
                    test_a = adj_temp[0].detach().cpu().numpy()
                    with open(filename, 'w') as f:
                        for i in range(len(test_a)):
                            for j in range(len(test_a[i])):
                                f.write(str(test_a[i][j]) + ' ')
                            f.write('\n')

                print_adj(adj_g, f'adj_g_{idx}.log')
                draw_adj(adj_g, f'adj_g_{idx}.png')
                print_adj(adj_a, f'adj_a_{idx}.log')
                draw_adj(adj_a, f'adj_a_{idx}.png')
                print_adj(adj_d, f'adj_d_{idx}.log')
                draw_adj(adj_d, f'adj_d_{idx}.png')
                print_adj(adj_s, f'adj_s_{idx}.log')
                draw_adj(adj_s, f'adj_s_{idx}.png')
                
                adj = adj_g

                div = torch.unsqueeze(adj.sum(-1), -1) + 1e-9
                print(div.shape)
                test_d = div[0].detach().cpu().numpy()
                with open(f'adj_div_{idx}.log', 'w') as f:
                    for i in range(len(test_d)):
                        f.write(str(test_d[i]) + ' ')

                adj = adj / (torch.unsqueeze(adj.sum(-1), -1) + 1e-9)
                test_t = adj[0].detach().cpu().numpy()
                with open(f'adj_t_{idx}.log', 'w') as f:
                    for i in range(len(test_t)):
                        for j in range(len(test_t[i])):
                            f.write(str(test_t[i][j]) + ' ')
                        f.write('\n')

                adj = adj + torch.eye(adj.shape[-1]).to(adj.device)
                test_e = adj[0].detach().cpu().numpy()
                with open(f'adj_e_{idx}.log', 'w') as f:
                    for i in range(len(test_e)):
                        for j in range(len(test_e[i])):
                            f.write(str(test_e[i][j]) + ' ')
                        f.write('\n')
                
                draw_adj(adj, f'adj_t_{idx}.png')
            idx += 1

            outputs = ty * target_std + target_mean + base
            # denorm targets, add base, get actual
            targets = (targets * target_std + target_mean + base)
            res += outputs.tolist()
            target += targets.tolist()
        return res, target
    