import torch
import utils as u
import logger
import time
import pandas as pd
import numpy as np
import pdb

class Trainer():
    def __init__(self,args, splitter, gcn, classifier, comp_loss, dataset, num_classes):
        self.args = args
        self.splitter = splitter
        self.tasker = splitter.tasker
        self.gcn = gcn
        self.classifier = classifier
        self.comp_loss = comp_loss

        self.num_nodes = dataset.num_nodes
        self.data = dataset
        self.num_classes = num_classes

        self.logger = logger.Logger(args, self.num_classes)

        self.init_optimizers(args)

        if self.tasker.is_static:
            ## pdb.set_trace()
            adj_matrix = u.sparse_prepare_tensor(self.tasker.adj_matrix, torch_size = [self.num_nodes], ignore_batch_dim = False)
            self.hist_adj_list = [adj_matrix]
            self.hist_ndFeats_list = [self.tasker.nodes_feats.float()]

    def init_optimizers(self,args):
        params = self.gcn.parameters()
        # 在trainer.py中修改优化器初始化
        #params = []
        #pdb.set_trace()
        #for p in params_org:
        #    #if isinstance(p, nn.ParameterList):
        #    if hasattr(p, '__iter__'):
        #        params.extend(list(p))
        #    else:
        #        params.append(p)
        self.gcn_opt = torch.optim.Adam(params, lr = args.learning_rate)
        params = self.classifier.parameters()
        self.classifier_opt = torch.optim.Adam(params, lr = args.learning_rate)
        self.gcn_opt.zero_grad()
        self.classifier_opt.zero_grad()

    def save_checkpoint(self, state, filename='checkpoint.pth.tar'):
        torch.save(state, filename)

    def load_checkpoint(self, filename, model):
        if os.path.isfile(filename):
            print("=> loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
            epoch = checkpoint['epoch']
            self.gcn.load_state_dict(checkpoint['gcn_dict'])
            self.classifier.load_state_dict(checkpoint['classifier_dict'])
            self.gcn_opt.load_state_dict(checkpoint['gcn_optimizer'])
            self.classifier_opt.load_state_dict(checkpoint['classifier_optimizer'])
            self.logger.log_str("=> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
            return epoch
        else:
            self.logger.log_str("=> no checkpoint found at '{}'".format(filename))
            return 0

    def train(self):
        self.tr_step = 0
        best_eval_valid = 0
        eval_valid = 0
        epochs_without_impr = 0

        for e in range(self.args.num_epochs):
            eval_train, nodes_embs = self.run_epoch(self.splitter.train, e, 'TRAIN', grad = True)
            #if len(self.splitter.dev)>0 #and e>self.args.eval_after_epochs:
            if len(self.splitter.dev)>0: 
                eval_valid, _ = self.run_epoch(self.splitter.dev, e, 'VALID', grad = False)
                if eval_valid>best_eval_valid:
                    best_eval_valid = eval_valid
                    epochs_without_impr = 0
                    print ('### w'+str(self.args.rank)+') ep '+str(e)+' - Best valid measure:'+str(eval_valid))
                else:
                    epochs_without_impr+=1
                    if epochs_without_impr>self.args.early_stop_patience:
                        print ('### w'+str(self.args.rank)+') ep '+str(e)+' - Early stop.')
                        break

            #if len(self.splitter.test)>0 and eval_valid==best_eval_valid #and e>self.args.eval_after_epochs:
            if len(self.splitter.test)>0 and eval_valid==best_eval_valid:
                eval_test, _ = self.run_epoch(self.splitter.test, e, 'TEST', grad = False)
                log_file = "log/log_trendingweibo_node_cls_egcn_o_20250311145116_r0.log"

                #if self.args.save_node_embeddings:
                #    self.save_node_embs_csv(nodes_embs, self.splitter.train_idx, log_file+'_train_nodeembs.csv.gz')
                #    self.save_node_embs_csv(nodes_embs, self.splitter.dev_idx, log_file+'_valid_nodeembs.csv.gz')
                #    self.save_node_embs_csv(nodes_embs, self.splitter.test_idx, log_file+'_test_nodeembs.csv.gz')

    def run_epoch(self, split, epoch, set_name, grad):
        t0 = time.time()
        log_interval = 999
        if set_name == 'TEST':
            log_interval = 1
        self.logger.log_epoch_start(epoch, len(split), set_name, minibatch_log_interval=log_interval)

        i = 0
        torch.set_grad_enabled(grad)
        ## pdb.set_trace()
        for s in split:
            if self.tasker.is_static:
                s = self.prepare_static_sample(s)
            else:
                s = self.prepare_sample(s)
            idxs = []
            vals = []
            for j in range(len(s.label_sp)):
                idxs.append(s.label_sp[j]['idx'].long())
                vals.append(s.label_sp[j]['vals'])
            #print(f"i step: {i}")
            #if i==142:
            predictions, nodes_embs = self.predict(s.hist_adj_lists,
                                                   s.hist_ndFeats_lists,
                                                   idxs,
                                                   s.node_mask_lists)
            ## pdb.set_trace()

            ## pdb.set_trace()
            loss = self.comp_loss(predictions, vals)
            #print(loss)
            if set_name in ['TEST', 'VALID'] and self.args.task == 'link_pred':
                self.logger.log_minibatch(predictions, vals, loss.detach(), adj = idxs)
            else:
                self.logger.log_minibatch(predictions, vals, loss.detach())
            if grad:
                self.optim_step(loss)
            i = i + 1

        ## pdb.set_trace()
        torch.set_grad_enabled(True)
        eval_measure = self.logger.log_epoch_done()

        return eval_measure, nodes_embs

    def predict(self,hist_adj_list,hist_ndFeats_list,node_indices,mask_list):
        nodes_embs = self.gcn(hist_adj_list,
                              hist_ndFeats_list,
                              mask_list)
        #print("nodes_embs shape")
        #print(nodes_embs.shape)

        gather_predictions = []

        #node_indices = node_indices.long()    #node_indices -- num_predict * target_nodes_num(目标节点个数不一致)
        for i in range(len(node_indices)):
            predict_batch_size = 100000
            gather_prediction=[]
            nodes_emb = nodes_embs[i]
            node_indice = node_indices[i]
            for i in range(1 +(node_indice.size(1)//predict_batch_size)):
                cls_input = self.gather_node_embs(nodes_emb, node_indice[:, i*predict_batch_size:(i+1)*predict_batch_size])
                predictions = self.classifier(cls_input)
                # print(predictions.shape)
                gather_prediction.append(predictions)
            gather_prediction=torch.cat(gather_prediction, dim=0)
            gather_predictions.append(gather_prediction)
        return gather_predictions, nodes_embs

    def gather_node_embs(self,nodes_embs,node_indices):
        cls_input = []
        ## pdb.set_trace()
        for node_set in node_indices:
            cls_input = nodes_embs[node_set]
        return cls_input

    def optim_step(self,loss):
        self.tr_step += 1
        loss.backward()

        if self.tr_step % self.args.steps_accum_gradients == 0:
            self.gcn_opt.step()
            self.classifier_opt.step()

            self.gcn_opt.zero_grad()
            self.classifier_opt.zero_grad()

    def prepare_sample2(self, sample):
        sample = u.Namespace(sample)
        # # pdb.set_trace()
        for i, adj in enumerate(sample.hist_adj_list):
            adj = u.sparse_prepare_tensor(adj, torch_size=[self.num_nodes])
            sample.hist_adj_list[i] = adj.to(self.args.device)

            nodes = self.tasker.prepare_node_feats(sample.hist_ndFeats_list[i])

            sample.hist_ndFeats_list[i] = nodes.to(self.args.device)
            node_mask = sample.node_mask_list[i]
            sample.node_mask_list[i] = node_mask.to(
                self.args.device).t()  # transposed to have same dimensions as scorer

            # 打印第i个时间步的掩码信息（前10个节点）
            print(f"\n--- Mask for timestep {i} ---")
            print("Mask tensor:", node_mask[:10])  # 打印前10个节点的掩码值
            print("Active nodes ratio: {}/{} = {:.2f}%".format(
                node_mask.sum().item(),
                node_mask.shape[0],
                node_mask.sum().item() / node_mask.shape[0] * 100
            ))

        label_sp = self.ignore_batch_dim(sample.label_sp)

        if self.args.task in ["link_pred", "edge_cls"]:
            label_sp['idx'] = label_sp['idx'].to(self.args.device).t()
        else:
            label_sp['idx'] = label_sp['idx'].to(self.args.device)

        label_sp['vals'] = label_sp['vals'].type(torch.long).to(self.args.device)
        sample.label_sp = label_sp

        # 打印标签详细信息
        print("\n=== Label Details ===")
        print(f"Task type: {self.args.task}")
        print("Label indices (first 10):\n", label_sp['idx'][:, :10].cpu().numpy())  # 假设是链接预测任务
        print("Label values distribution:")
        print(pd.Series(label_sp['vals'].cpu().numpy()).value_counts().to_string())

        # 原始形状打印（用户已有代码）
        print("Adj matrix shape:", sample.hist_adj_list[0].shape)
        print("Node features shape:", sample.hist_ndFeats_list[0].shape)
        print("Mask shape:", sample.node_mask_list[0].shape)
        print("Label indices shape:", sample.label_sp['idx'].shape)

        return sample
        
    def prepare_sample(self,sample,num_predict=5):
         sample = u.Namespace(sample)
         #pdb.set_trace()
         # 处理hist_adj_lists（嵌套list）
         for i, adj_sublist in enumerate(sample.hist_adj_lists):
             for j, adj in enumerate(adj_sublist):
                 adj = u.sparse_prepare_tensor(adj, torch_size=[self.num_nodes])
                 sample.hist_adj_lists[i][j] = adj.to(self.args.device)
 
         # 处理hist_ndFeats_lists（嵌套list）
         for i, feats_sublist in enumerate(sample.hist_ndFeats_lists):
             for j, feat in enumerate(feats_sublist):
                 nodes = self.tasker.prepare_node_feats(feat)
                 merged_feats = torch.cat([self.data.nodes_static_feats, nodes], dim=1)
                 sample.hist_ndFeats_lists[i][j] = merged_feats.to(self.args.device)
 
         # 处理node_mask_lists（嵌套list）
         for i, mask_sublist in enumerate(sample.node_mask_lists):
             for j, node_mask in enumerate(mask_sublist):
                 sample.node_mask_lists[i][j] = node_mask.to(self.args.device).t()
 
         label_sp = self.ignore_batch_dim(sample.label_sp,num_predict)
 
         if self.args.task in ["link_pred", "edge_cls"]:
             label_sp['idx'] = label_sp['idx'].to(self.args.device).t()
         else:
            for i in range(num_predict):
                label_sp[i]['idx'] = label_sp[i]['idx'].to(self.args.device)
         for i in range(num_predict):
            label_sp[i]['vals'] = label_sp[i]['vals'].type(torch.long).to(self.args.device)
         sample.label_sp = label_sp
         return sample


    def prepare_sample_r(self,sample):
        sample = u.Namespace(sample)
        #pdb.set_trace()
        for i,adj in enumerate(sample.hist_adj_list):
            adj = u.sparse_prepare_tensor(adj,torch_size = [self.num_nodes])
            sample.hist_adj_list[i] = adj.to(self.args.device)
            ## pdb.set_trace()

            nodes = self.tasker.prepare_node_feats(sample.hist_ndFeats_list[i])
            merged_feats = torch.cat([self.data.nodes_static_feats, nodes], dim=1)
            nodes = merged_feats

            sample.hist_ndFeats_list[i] = nodes.to(self.args.device)
            node_mask = sample.node_mask_list[i]
            sample.node_mask_list[i] = node_mask.to(self.args.device).t() #transposed to have same dimensions as scorer

        label_sp = self.ignore_batch_dim(sample.label_sp)

        if self.args.task in ["link_pred", "edge_cls"]:
            label_sp['idx'] = label_sp['idx'].to(self.args.device).t()   ####### ALDO TO CHECK why there was the .t() -----> because I concatenate embeddings when there are pairs of them, the embeddings are row vectors after the transpose
        else:
            label_sp['idx'] = label_sp['idx'].to(self.args.device)

        label_sp['vals'] = label_sp['vals'].type(torch.long).to(self.args.device)
        sample.label_sp = label_sp

        return sample

    def prepare_static_sample(self,sample):
        sample = u.Namespace(sample)

        sample.hist_adj_list = self.hist_adj_list

        sample.hist_ndFeats_list = self.hist_ndFeats_list

        label_sp = {}
        label_sp['idx'] =  [sample.idx]
        label_sp['vals'] = sample.label
        sample.label_sp = label_sp

        return sample

    def ignore_batch_dim(self,adj,num_predict=5):
        if self.args.task in ["link_pred", "edge_cls"]:
            adj['idx'] = adj['idx'][0]
        for i in range(num_predict):
            adj[i]['vals'] = adj[i]['vals'][0]
        #adj['vals'] = adj['vals'][0]
        return adj

    def save_node_embs_csv2(self, nodes_embs, indexes, file_name):
        # 强制将节点嵌入转移到CPU并转换类型
        nodes_embs = nodes_embs.cpu().double()  # <-- 关键修复点1

        csv_node_embs = []
        max_id = len(self.tasker.data.contID_to_origID) - 1

        for node_id in indexes:
            node_id_int = node_id.item()
            if node_id_int > max_id:
                print(f"跳过无效节点 ID: {node_id_int}（最大有效 ID 为 {max_id}）")
                continue

            try:
                # 创建CPU端的orig_ID张量（默认就在CPU）
                orig_ID = torch.DoubleTensor([self.tasker.data.contID_to_origID[node_id_int]])  # <-- 关键修复点2

                # 获取已转换到CPU的节点嵌入
                node_emb = nodes_embs[node_id]

                # 确保两个张量都在CPU后进行拼接
                combined = torch.cat((orig_ID, node_emb))  # 不再需要显式转换类型

                csv_node_embs.append(combined.detach().numpy())

            except KeyError:
                print(f"KeyError: {node_id_int} 不存在于映射中")

        # 保存时使用更高效的方式
        if csv_node_embs:  # 避免空数据保存
            pd.DataFrame(
                np.vstack(csv_node_embs),  # 比np.array更高效
                dtype=np.float64
            ).to_csv(
                file_name,
                header=None,
                index=None,
                compression="gzip"
            )
        else:
            print("警告：未保存任何有效嵌入数据")

    def save_node_embs_csv(self, nodes_embs, indexes, file_name):
        csv_node_embs = []
        for node_id in indexes:
            orig_ID = torch.DoubleTensor([self.tasker.data.contID_to_origID[node_id]])

            csv_node_embs.append(torch.cat((orig_ID,nodes_embs[node_id].double())).detach().numpy())

        pd.DataFrame(np.array(csv_node_embs)).to_csv(file_name, header=None, index=None, compression='gzip')
        #print ('Node embs saved in',file_name)
