import os
import torch
import torch.nn as nn
import math
import argparse
from transformers import (
    RobertaTokenizer,
    RobertaConfig,
    RobertaModel
)

import fitlog
from fastNLP import Trainer, Callback, Metric
from fastNLP.core.callbacks import LoadBestModelCallback, CheckpointCallback, FitlogCallback, EarlyStopCallback, LRSchedCallback, TorchWarmupCallback

from v2.training_process.data import CodeSearchNetPYGDataset
from datasets import load_metric
from itertools import chain

from torch.utils.data import RandomSampler, DataLoader
from v2.models import GraphormerModel, Seq2Seq


class BLEUScore(Metric):

    def __init__(self):
        super().__init__()
        self.preds = []
        self.labels = []
        self.bleu = load_metric('sacrebleu')

    def get_metric(self) -> dict:
        aggs = self.all_gather_object([self.preds, self.labels])
        preds = chain(*[a[0] for a in aggs])
        target = chain(*[a[1] for a in aggs])
        scores = self.bleu.compute(predictions=preds, references=target)
        return scores

    def update(self, preds, target):
        self.preds += preds
        self.labels += target

    def reset(self):
        self.preds = []
        self.labels = []


class EvaluateCallback(Callback):

    def __init__(self):
        super().__init__()

    def on_train_end(self, trainer):
        trainer.run_evaluate()


class Sample(object):
    def __init__(self, idx, source, ast_des, target):
        self.idx = idx
        self.source = source
        self.ast_des = ast_des
        self.target = target


def get_model_and_tokenizer(checkpoint=None):
    tokenizer = RobertaTokenizer.from_pretrained('microsoft/codebert-base')
    ast_tokenizer = RobertaTokenizer.from_pretrained('microsoft/codebert-base')
    javalang_special_tokens = ['CompilationUnit', 'Import', 'Documented', 'Declaration', 'TypeDeclaration', 'PackageDeclaration', 'ClassDeclaration', 'EnumDeclaration', 'InterfaceDeclaration', 'AnnotationDeclaration', 'Type', 'BasicType', 'ReferenceType', 'TypeArgument', 'TypeParameter', 'Annotation', 'ElementValuePair', 'ElementArrayValue', 'Member', 'MethodDeclaration', 'FieldDeclaration', 'ConstructorDeclaration', 'ConstantDeclaration', 'ArrayInitializer', 'VariableDeclaration', 'LocalVariableDeclaration', 'VariableDeclarator', 'FormalParameter', 'InferredFormalParameter', 'Statement', 'IfStatement', 'WhileStatement', 'DoStatement', 'ForStatement', 'AssertStatement', 'BreakStatement', 'ContinueStatement', 'ReturnStatement', 'ThrowStatement', 'SynchronizedStatement', 'TryStatement', 'SwitchStatement', 'BlockStatement', 'StatementExpression', 'TryResource', 'CatchClause', 'CatchClauseParameter', 'SwitchStatementCase', 'ForControl', 'EnhancedForControl', 'Expression', 'Assignment', 'TernaryExpression', 'BinaryOperation', 'Cast', 'MethodReference', 'LambdaExpression', 'Primary', 'Literal', 'This', 'MemberReference', 'Invocation', 'ExplicitConstructorInvocation', 'SuperConstructorInvocation', 'MethodInvocation', 'SuperMethodInvocation', 'SuperMemberReference', 'ArraySelector', 'ClassReference', 'VoidClassReference', 'Creator', 'ArrayCreator', 'ClassCreator', 'InnerClassCreator', 'EnumBody', 'EnumConstantDeclaration', 'AnnotationMethod', 'Modifier']
    special_tokens_dict = {'additional_special_tokens': javalang_special_tokens}
    num_added_toks = ast_tokenizer.add_special_tokens(special_tokens_dict)

    roberta = RobertaModel.from_pretrained('microsoft/codebert-base')
    roberta_config = RobertaConfig.from_pretrained('microsoft/codebert-base')

    args.num_atoms = tokenizer.vocab_size + num_added_toks + 1
    graph_encoder = GraphormerModel.build_model(args)
    if checkpoint is None:
        graph_encoder.encoder.graph_encoder.graph_node_feature.atom_encoder.weight.data[:tokenizer.vocab_size, :] = roberta.get_input_embeddings().weight.data

    decoder_layer = nn.TransformerDecoderLayer(d_model=roberta_config.hidden_size, nhead=roberta_config.num_attention_heads)
    decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
    model = Seq2Seq(encoder=roberta, decoder=decoder, gnn_encoder=graph_encoder, tokenizer=tokenizer, config=roberta_config,
                    graph_embedding=args.num_classes, beam_size=beam_size, dropout=0.2, max_length=max_target_length,
                    sos_id=tokenizer.cls_token_id, eos_id=tokenizer.sep_token_id)
    if checkpoint is not None:
        model.load_state_dict(torch.load(f"{save_dir}/{checkpoint}"), strict=True)

    return model, tokenizer, ast_tokenizer


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    # torch.cuda.manual_seed_all(seed)
    # np.random.seed(seed)
    # random.seed(seed)
    # torch.backends.cudnn.deterministic = True


def parse_args():
    ap = argparse.ArgumentParser("arguments for graphormer training")
    ap.add_argument('-b', '--batch_size', type=int, default=4, help='batch size')
    ap.add_argument('-as', '--accumulation_steps', type=int, default=4, help='gradient accumulation steps')
    ap.add_argument('-es', '--evaluation_steps', type=int, default=8192, help='evaluation batch steps')
    ap.add_argument('-beam', '--beam_size', type=int, default=5, help='beam size')
    ap.add_argument('-ep', '--epoch_num', type=int, default=5, help='epoch num')
    ap.add_argument('-lr', '--learning_rate', type=float, default=5e-5, help='learning rate')
    ap.add_argument('-glr', '--graph_learning_rate', type=float, default=5e-5, help='graph learning rate')
    ap.add_argument('-wd', '--weight_decay', type=float, default=0.0, help='weight decay')
    ap.add_argument('-ae', '--adam_epsilon', type=float, default=1e-8, help='adam epsilon')
    ap.add_argument('--max_grad_norm', type=float, default=1.,
                    help='maximum gradient norm')
    ap.add_argument('-wp', '--warmup_percent', type=float, default=0.2,
                    help='how many percentage of steps are used for warmup')
    ap.add_argument('-sdo', '--seq2seq_dropout', type=float, default=0.1,
                    help='pgnn dropout probability')

    ap.add_argument('-nc', '--num_classes', type=int, default=768,
                    help='number of classes or regression targets')
    ap.add_argument('-na', '--num_atoms', type=int, default=512,
                    help='node types per graph')  # code_bert vocab大小
    ap.add_argument('--max_nodes', type=int, default=512,
                    help='max nodes per graph')
    ap.add_argument('-ne', '--num_edges', type=int, default=6,
                    help='number of edge types in the graph')
    ap.add_argument('-nid', '--num_in_degree', type=int, default=512,
                    help='number of in degree types in the graph')
    ap.add_argument('-nod', '--num_out_degree', type=int, default=512,
                    help='number of out degree types in the graph')
    ap.add_argument('-ns', '--num_spatial', type=int, default=512,
                    help='number of spatial types in the graph')
    ap.add_argument('-ned', '--num_edge_dis', type=int, default=128,
                    help='number of edge dis types in the graph')
    ap.add_argument('--multi_hop_max_dist', type=int, default=20,
                    help='max distance of multi-hop edges')

    ap.add_argument('-spm', '--spatial_pos_max', type=int, default=20,
                    help='max distance of spatial position')
    ap.add_argument('-et', '--edge_type', type=str, default='multi_hop',
                    help='edge type in the graph')
    ap.add_argument('-s', '--seed', type=int, default=42,
                    help='common.seed')
    ap.add_argument('-tes', '--train_epoch_shuffle', type=bool, default=False,
                    help='whether to shuffle the dataset at each epoch')
    ap.add_argument('-udd', '--user_data_dir', type=str, default='',
                    help='path to the module of user-defined dataset')
    ap.add_argument('-sd', '--save_dir', type=str, default='../../../checkpoints',
                    help='path to save the checkpoints of the model')

    ap.add_argument('--max_target_length', type=int, default=256,
                    help='inference length')
    
    # graphormer
    ap.add_argument('-eed', '--encoder_embed_dim', type=int, default=768,
                    help='encoder embedding dimension')
    ap.add_argument('-el', '--encoder_layers', type=int, default=12,
                    help='encoder layer num')
    ap.add_argument('-eah', '--encoder_attention_heads', type=int, default=32,
                    help='encoder attention heads')
    ap.add_argument('-efe', '--encoder_ffn_embed_dim', type=int, default=768,
                    help='encoder feed-forward network embedding dimension')
    ap.add_argument('-do', '--dropout', type=float, default=0.0,
                    help='dropout')
    ap.add_argument('-atdo', '--attention_dropout', type=float, default=0.1,
                    help='attention dropout')
    ap.add_argument('-acdo', '--act_dropout', type=float, default=0.1,
                    help='act dropout')

    ap.add_argument('-afn', '--activation_fn', type=str, default="gelu",
                    help='activation function')
    ap.add_argument('-enb', '--encoder_normalize_before', type=bool, default=True,
                    help='encoder normalize before')
    ap.add_argument('-agi', '--apply_graphormer_init', type=bool, default=True,
                    help='apply graphormer init')
    ap.add_argument('-seioe', '--share_encoder_input_output_embed', type=bool, default=False,
                    help='share input output embed')
    ap.add_argument('-ntpe', '--no_token_positional_embeddings', type=bool, default=False,
                    help='no token positional embeddings')
    ap.add_argument('-pln', '--pre_layernorm', type=bool, default=False,
                    help='pre layernorm')

    args, _ = ap.parse_known_args()
    return args


if __name__ == '__main__':

    # PYTHONPATH=../../../ CUDA_VISIBLE_DEVICES=0,1,2,3 python  -m torch.distributed.launch --nproc_per_node=4 ./train.py
    args = parse_args()
    batch_size, accumulation_steps, evaluation_steps, beam_size, epoch_num, lr, graph_lr, weight_decay, adam_epsilon, max_grad_norm, warmup_percent, dropout, max_nodes, multi_hop_max_dist, spatial_pos_max, max_target_length, save_dir, seed = args.batch_size, args.accumulation_steps, args.evaluation_steps, args.beam_size, args.epoch_num, args.learning_rate, args.graph_learning_rate, args.weight_decay, args.adam_epsilon, args.max_grad_norm, args.warmup_percent, args.seq2seq_dropout, args.max_nodes, args.multi_hop_max_dist, args.spatial_pos_max, args.max_target_length, args.save_dir.strip(os.sep), args.seed

    setup_seed(seed)

    train_dataset = CodeSearchNetPYGDataset(data_size=164814, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=-1, dataset="train")
    valid_dataset = CodeSearchNetPYGDataset(data_size=5179, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=-1, dataset="valid")

    valid_sampler = RandomSampler(valid_dataset, num_samples=1000, replacement=True)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.collate)
    valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, sampler=valid_sampler, collate_fn=valid_dataset.collate)

    total_steps = math.ceil(epoch_num*len(train_dataloader)*1./batch_size)
    warmup_steps = int(total_steps*warmup_percent)

    model, tokenizer, ast_tokenizer = get_model_and_tokenizer()

    no_decay = ['bias', 'LayerNorm.weight']
    all_param_optimizer = list(model.named_parameters())
    gnn_param_optimizer = list(model.gnn_encoder.named_parameters())
    other_param_optimizer = []

    for op in all_param_optimizer:
        # print(op[0])
        if 'gnn_encoder' not in op[0]:
            other_param_optimizer.append(op)

    optimizer_grouped_parameters = [
        {'params': [p for n, p in other_param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': weight_decay, 'lr': lr},
        {'params': [p for n, p in other_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
         'lr': lr},

        {'params': [p for n, p in gnn_param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': weight_decay, 'lr': graph_lr},
        {'params': [p for n, p in gnn_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
         'lr': graph_lr}
    ]

    optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=lr, eps=adam_epsilon)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=total_steps, gamma=0.9)

    callbacks = [
        LoadBestModelCallback(save_folder=save_dir),
        CheckpointCallback(folder=save_dir, topk=3, monitor="bleu"),
        FitlogCallback(log_loss_every=64),
        TorchWarmupCallback(),
        LRSchedCallback(scheduler),
        EarlyStopCallback(),
        EvaluateCallback()
    ]

    input_mapping = {
        "batch": "batch_data",
        "source_ids": "source_ids",
        "source_mask": "source_mask",
        "target_ids": "target_ids",
        "target_mask": "target_mask",
        "target": "target"
    }

    train_output_mapping = {
        "loss": "loss"
    }

    evaluate_output_mapping = {
        "preds": "preds",
        "target": "target"
    }

    fitlog.set_log_dir("../../../logs/")
    fitlog.add_hyper(args)

    trainer = Trainer(
        model,
        train_dataloader,
        optimizer,
        driver="torch",
        device=-1,
        n_epochs=100000,
        evaluate_dataloaders=valid_dataloader,
        train_fn="forward",
        evaluate_fn="generate",
        callbacks=callbacks,
        metrics={"bleu": BLEUScore()},
        evaluate_every=evaluation_steps,
        input_mapping=input_mapping,
        train_output_mapping=train_output_mapping,
        evaluate_output_mapping=evaluate_output_mapping,
        accumulation_steps=accumulation_steps,
        fp16=True,
        monitor="score#bleu",
        larger_better=True,
        n_batches=-1,
        overfit_batches=100,
        torch_kwargs={'ddp_kwargs': {'find_unused_parameters': True}}
    )

    trainer.run()
