from dataclasses import dataclass
from datetime import datetime
from enum import unique
import json
from typing import Any, Dict, Optional, Tuple, List, Iterable, Union
from common_util.vocab_embeding import Vocabulary
from dataset_adapter.language_python import VarmisuseTask
from models.origin.tensor_gcn import Tensor_GCN
from tasks import VarmisuseForRepairLayer, VarnamingOutputLayer
from models.model_metrics import cal_metrics,pretty_print_epoch_task_metrics,cal_early_stopping_metric
from models.model_builder import name_to_model
from dataset_adapter import DataFold,name_to_dataloader
import torch.nn as nn
import time
import os
import torch
import joblib
from common_util import log
import tasks.varmisuse_model as VarmisuseModel
from .config import Py150kvarmisuseTaskConfig

concat_singal = "_- ,/\\"

def name_to_output_model(name: str, args):
    """将字符串转为输出模型并返回。

    Args:
        name (str): 输出模型名称。推荐小写。
        args (_type_): 可能会用到的参数

    Raises:
        ValueError: 类名不存在
    """
    try:
        with open(args.value_dict_dir, 'r') as f:
            value_dict = json.load(f)
        with open(args.type_dict_dir, 'r') as f:
            type_dict = json.load(f)
    except:
        pass
    
    name = name.lower()
    name = name.replace(concat_singal, "")
    if name in ["vm", "varmisuse", "variablemisuse"]:
        return VarmisuseForRepairLayer(out_features=args.out_features,
                                    max_node_per_graph=args.max_variable_candidates,
                                    criterion=nn.CrossEntropyLoss(),
                                    metrics=cal_metrics,
                                    device=args.device)
        
    elif name in ["cc", "varnaming",  "codecompletion" ]:
        # TODO: 待补充
        return VarnamingOutputLayer(out_features=args.out_features,
                                    classifier_nums=len(value_dict)+1,
                                    criterion=nn.CrossEntropyLoss(),
                                    metrics=cal_metrics,
                                    device=args.device
                                    )
    else:
        raise ValueError("Unkown output model name '%s'" % name)
    
    

def build_model(upstream_model_name,downstream_model_name,args,**kwargs):
    upstream_model=name_to_model(upstream_model_name,args)
    downstream_model=name_to_output_model(downstream_model_name,args)
    
    return upstream_model,downstream_model

def load_vocab_dict(args:Py150kvarmisuseTaskConfig):
    return Vocabulary(args.vocab_dict_path)


def run_epoch(model,
        output_model,
        optimizer,      
        epoch_name: str,
        data: Iterable[Any],
        data_fold: DataFold,
        scheduler:float=None,
        quiet: Optional[bool] = False,
    ) -> Tuple[float]:
        """具体的每一轮训练。

        Args:
            epoch_name (str): 每一轮名称
            data (Iterable[Any]): 该轮的数据
            data_fold (DataFold): 是test还是train
            quiet (Optional[bool], optional): 当为真时，不显示任何信息。. Defaults to False.

        Returns:
            Tuple[float]: [返回一些metrics]
        """
        
        # 记录部分参数
        # processed_* 每一个epoch信息的记录。
        start_time = time.time()
        processed_graphs, processed_nodes, processed_batch = 0, 0, 0
        epoch_loss = 0.0
        task_metric_results = []

        for batch_data in data.batch_generator():

            #processed_graphs += batch_data["num_graphs"]
            #processed_nodes += batch_data["num_nodes"]
            processed_batch += 1


            # TODO: 模型输出后，再根据输出层产生输出。
            if data_fold == DataFold.TRAIN:
                optimizer.zero_grad()
                model.train()
                output_model.train()
                output = model(**batch_data)
                logits, loss, metrics = output_model(output, **batch_data)
                epoch_loss += loss.item()
                task_metric_results.append(metrics)

                loss.backward()
                optimizer.step()
                if scheduler:
                    # 如果设置了学习率的动态变化的化，对scheduler进行更新。
                    scheduler.step()

            if data_fold == DataFold.VALIDATION:
                #model.eval() # eval和no_grad设置一个就可。这里为了保险都设置了。TODO: 看看这能不能修改。
                #output_model.eval()
                with torch.no_grad():

                    output = model(**batch_data)
                    logits, loss, metrics = output_model(output, **batch_data)
                    epoch_loss += loss.item()
                    task_metric_results.append(metrics)

            if not quiet:
                # end="\r"意思是，打印后，光标回到句子头部。下次打印的时候覆盖这一行。
                print("Runing %s, batch %i. Loss so far: %.4f" %
                      (epoch_name, processed_batch,
                       epoch_loss / processed_batch),
                      end="\r")
                

        epoch_time = time.time() - start_time
        per_graph_loss = epoch_loss / processed_batch
        graphs_per_sec = processed_graphs / epoch_time
        nodes_per_sec = processed_nodes / epoch_time

        return per_graph_loss, task_metric_results, processed_graphs, processed_batch, graphs_per_sec, nodes_per_sec, processed_graphs, processed_nodes

def train(data,model,output_model,args,quiet=False):
        """对模型进行训练

        Args:
            quiet (bool, optional): _description_. Defaults to False.
        """
        def default_dir():
            
            now=datetime.now()
            return os.path.join(args.result_dir,f'{now.year}_{now.month}_{now.day}_{now.hour}_{now.minute}')
        
        target_dir=default_dir()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        
        # 在日志中打印当前的设置参数。
        log(json.dumps(vars(args), indent=4))

        # 存储当前训练过程中的最好参数
        (best_valid_metric, best_val_metric_epoch,
         best_val_metric_descr) = (float("+inf"), 0, "")
        for epoch in range(args.cur_epoch, args.max_epochs + 1):
            log("== Epoch %i" % epoch)
            # --train
            train_loss, train_task_metrics, train_num_graphs, train_num_batchs, train_graphs_p_s, train_nodes_p_s, train_graphs, train_nodes = run_epoch(
                model,
                output_model,
                optimizer,
                "epoch %i (training)" % epoch,
                data,
                DataFold.TRAIN,
                quiet=quiet)

            if not quiet:
                print("\r\x1b[K", end='')  #该函数意义将光标回到该行开头，并擦除整行。
            log(
                " Train: loss: %.5f || %s || graphs/sec: %.2f | nodes/sec: %.0f | graphs: %.0f | nodes: %.0f | lr: %0.6f"
                %
                (train_loss,
                 pretty_print_epoch_task_metrics(
                     train_task_metrics, train_num_graphs, train_num_batchs),
                 train_graphs_p_s, train_nodes_p_s, train_graphs, train_nodes, optimizer.state_dict()["param_groups"][0]['lr'])) #, self.scheduler.get_last_lr()[0]

            # --validate
            valid_loss, valid_task_metrics, valid_num_graphs, valid_num_batchs, valid_graphs_p_s, valid_nodes_p_s, test_graphs, test_nodes = run_epoch(
                model,
                output_model,
                optimizer,
                "epoch %i (validation)" % epoch,
                data,
                DataFold.VALIDATION,
                quiet=quiet)

            early_stopping_metric = cal_early_stopping_metric(
                valid_task_metrics)
            valid_metric_descr = pretty_print_epoch_task_metrics(
                valid_task_metrics, valid_num_graphs, valid_num_batchs)
            if not quiet:
                print("\r\x1b[K", end='')
            log(
                " valid: loss: %.5f || %s || graphs/sec: %.2f | nodes/sec: %.0f | graphs: %.0f | nodes: %.0f | lr: %0.6f"
                % (valid_loss, valid_metric_descr, valid_graphs_p_s,
                   valid_nodes_p_s, test_graphs, test_nodes, optimizer.state_dict()["param_groups"][0]['lr'])) # , self.scheduler.get_last_lr()[0]

            if early_stopping_metric < best_valid_metric:
                # 设置早停。
                args.cur_epoch = epoch + 1
                save_model(model,output_model,optimizer,args,target_dir)
                log(
                    "  (Best epoch so far, target metric decreased to %.5f from %.5f. Saving to '%s')"
                    % (early_stopping_metric, best_valid_metric,
                       target_dir))
                best_valid_metric = early_stopping_metric
                # 保存每一个batch的fpr,tpr，然后将其保存下来，最后读取再绘制。[{"fpr":[], "tpr":[], "auc":[]}].存储为json。
                
                best_val_metric_epoch = epoch
                best_val_metric_descr = valid_metric_descr
                
def save_model(model,output_model,optimizer,args,target_dir):
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    joblib.dump(model, os.path.join(target_dir, "model.joblib"))
    joblib.dump(output_model, os.path.join(target_dir, "output_model.joblib"))
    torch.save(optimizer,os.path.join(target_dir, "optimizer.pt"))
    with open(os.path.join(target_dir, "params.json"), "w") as f:
        json.dump(vars(args), f, indent=4)
        

def build_varmisuse_model(args:Py150kvarmisuseTaskConfig):
    vocab_dict=load_vocab_dict(args)
    data=name_to_dataloader(args.dataset_name,args.train_data_dir,DataFold.TRAIN,vocab_dict,args,0)
    
    
    model,output_model=build_model(args.backbone_model,args.output_model,args)
    train(data,model,output_model,args)
    
def run_for_varmiuse_model():
    build_varmisuse_model(Py150kvarmisuseTaskConfig())


def test():
    def default_test(args):
        vocab_dict=load_vocab_dict(args)
        data=name_to_dataloader(args.dataset_name,args.train_data_dir,DataFold.TRAIN,vocab_dict,args,0)
        
        model=VarmisuseModel(args)
        model.to(args.device)
        count=0
        
        for batch_data in data.batch_generator():
            
            bug_location_predict,repair_predict,loss,metrics=model(**batch_data)
            # print(torch.argmax(bug_location_predict,1))
            # print(batch_data["label"])
            #print(v2)
            
            count+=1
            if count>1:
                break
    args=Py150kvarmisuseTaskConfig()
    default_test(args)
    
def test_train(args:Py150kvarmisuseTaskConfig=Py150kvarmisuseTaskConfig()):
        
        
        def default_dir():
            
            now=datetime.now()
            return os.path.join(args.result_dir,f'{now.year}_{now.month}_{now.day}_{now.hour}_{now.minute}')
        
        target_dir=default_dir()
        
        model=VarmisuseModel.VarmisuseModel(args)
        vocab_dict=load_vocab_dict(args)
        data=name_to_dataloader(args.dataset_name,args.train_data_dir,DataFold.TRAIN,vocab_dict,args,0)
        vali_data=name_to_dataloader(args.dataset_name,args.validate_data_dir,DataFold.TRAIN,vocab_dict,args,0)
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

        model.to(args.device)
        
        now_number=0

        for epoch in range(args.cur_epoch, args.max_epochs + 1):
            print("== Epoch %i\n" % epoch)
            # --train
            
            train_loss=0.
            vali_loss=0.
            
            train_loc_precison_no_bug=0
            train_loc_precison_bug=0
            train_repair_precison=0
            vali_loc_precison_no_bug=0
            vali_loc_precison_bug=0
            vali_repair_precison=0
            
            for i,batch in enumerate(data.batch_generator()):
                
                bug_location_predict,repair_predict,loss,metrics=model(**batch)
                
                train_loss+=metrics[3]
                
                
                loss.backward()
                
                train_loc_precison_no_bug+=metrics[0]
                train_loc_precison_bug+=metrics[1]
                train_repair_precison+=metrics[2]
                
                optimizer.step()
                optimizer.zero_grad()
                
                print(
                    f"\rTrain: loss: {train_loss/(i+1)}, prec: {train_loc_precison_no_bug/(i+1)} / {train_loc_precison_bug/(i+1)} / {train_repair_precison/(i+1)}"
                    ,end=" "*30)
            # --validate
            print()
            with torch.no_grad():
                total=0
                for i,batch in enumerate(vali_data.batch_generator()):
                    bug_location_predict,repair_predict,loss,metrics=model(**batch)
                    
                    vali_loss+=metrics[3]
                    #repair_success+=metrics["repair_precison"]
                    vali_loc_precison_no_bug+=metrics[0]
                    vali_loc_precison_bug+=metrics[1]
                    vali_repair_precison+=metrics[2]
                    total+=1
                log(
                f"valid: loss: {vali_loss/total} valid precison:  {vali_loc_precison_no_bug/(total)}/{vali_loc_precison_bug/total}/{vali_repair_precison/total}")