import os
import sys
import time
import logging
import torch
import torch.nn as nn
from tqdm import tqdm
import math
import json
import argparse
import copy
from transformers import (
    RobertaTokenizer,
    RobertaConfig,
    RobertaModel,
    get_constant_schedule,
    get_constant_schedule_with_warmup,
    get_cosine_schedule_with_warmup,
    get_linear_schedule_with_warmup,
    get_cosine_with_hard_restarts_schedule_with_warmup
)

from v1.training_process.data import CodeSearchNetPYGDataset

from v1.training_process.metrics import bleu

from torch.utils.data import RandomSampler, DataLoader
from v1.models import GraphormerModel, graphormer_base_architecture, Seq2Seq


class LoggingHandler(logging.Handler):
    def __init__(self, level=logging.NOTSET):
        super().__init__(level)

    def emit(self, record):
        try:
            msg = self.format(record)
            tqdm.write(msg)
            self.flush()
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)


class Sample(object):
    def __init__(self, idx, source, ast_des, target):
        self.idx = idx
        self.source = source
        self.ast_des = ast_des
        self.target = target


def get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
    """
    Returns the correct learning rate scheduler
    """
    scheduler = scheduler.lower()
    if scheduler == 'constantlr':
        return get_constant_schedule(optimizer)
    elif scheduler == 'warmupconstant':
        return get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
    elif scheduler == 'warmuplinear':
        return get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    elif scheduler == 'warmupcosine':
        return get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    elif scheduler == 'warmupcosinewithhardrestarts':
        return get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    else:
        raise ValueError("Unknown scheduler {}".format(scheduler))


def train(model, optimizer, scheduler, train_data, dev_data, accumulation_steps, best_loss, best_bleu, save_dir, valid_dir):
    tr_loss, acc_loss, acc_num = 0, 0, 0

    best_model_weights = None

    eval = False

    model.train()  # model was in eval mode in evaluate(); re-activate the train mode
    batch_num = len(train_data)
    with tqdm(enumerate(train_data), total=batch_num, position=0, file=sys.stdout, desc="training", ascii=True) as iterator:
        for step, batch in iterator:
            if batch is None:
                continue
            optimizer.zero_grad()  # clear gradients first
            torch.cuda.empty_cache()  # releases all unoccupied cached memory

            loss, _, _,  = model(batch, source_ids=batch['source_ids'], source_mask=batch['source_mask'],
                                 target_ids=batch['target_ids'], target_mask=batch['target_mask'])

            acc_num += 1
            loss = loss / accumulation_steps
            acc_loss += loss.item()
            loss.backward()

            if (step + 1) % accumulation_steps == 0:
                optimizer.step()
                # 更新参数
                optimizer.zero_grad()
                scheduler.step()
                tr_loss += acc_loss
                train_loss = round(tr_loss * accumulation_steps / acc_num, 4)
                # bar.set_description('loss {}'.format(train_loss))
                iterator.set_postfix_str("batch loss= {}, train loss= {} ".format(acc_loss, train_loss))
                acc_loss = 0

            if (step + 1) % 4096 == 0:
                tr_loss, acc_num = 0, 0
                eval_bleu = evaluate_bleu(model, dev_data, save_dir, valid_dir)
                logging.info('==> step {} dev bleu: {}'.format(step, eval_bleu))
                if eval_bleu > best_bleu:
                    best_bleu = eval_bleu
                    best_model_weights = copy.deepcopy(model.cpu().state_dict())
                    torch.save(best_model_weights, f'{save_dir}/model_bleu_{step}_{eval_bleu}.pt')
                    model.to(device)
                eval = False
            elif (step + 1) % 1024 == 0:
                tr_loss, acc_num = 0, 0
                eval_loss = evaluate_loss(model, dev_data, mute=True)
                logging.info('==> step {} dev loss: {}'.format(step, eval_loss))
                if eval_loss < best_loss:
                    best_loss = eval_loss
                    best_model_weights = copy.deepcopy(model.cpu().state_dict())
                    torch.save(best_model_weights, f'{save_dir}/model_loss_{step}_{eval_loss}.pt')
                    model.to(device)
        else:
            if eval:
                eval_bleu = evaluate_bleu(model, dev_data, save_dir, valid_dir)
                logging.info('==> step {} dev bleu: {}'.format(len(train_data)-1, eval_bleu))
                last_weight = copy.deepcopy(model.cpu().state_dict())
                if eval_bleu > best_bleu:
                    best_model_weights = last_weight
                torch.save(last_weight, f'{save_dir}/model_last_{eval_bleu}.pt')
                model.to(device)
    return best_model_weights


def evaluate_loss(model, test_data, mute=False):
    model.eval()
    valid_loss, tokens_num = 0, 0
    with torch.no_grad():
        for step, batch in tqdm(enumerate(test_data), desc='evaluating_loss'):
            _, loss, num = model(batch_data=batch, source_ids=batch['source_ids'], source_mask=batch['source_mask'],
                                 target_ids=batch['target_ids'], target_mask=batch['target_mask'])
            valid_loss += loss.sum().item()
            tokens_num += num.sum().item()
            # Pring loss of valid dataset
            # model.train()
            if not mute:
                print('==>loss<==', valid_loss)
    valid_loss /= tokens_num
    model.train()
    return valid_loss


def evaluate_bleu(model, valid_data, save_dir, valid_dir):
    model.eval()
    p = []
    with torch.no_grad():
        for step, batch in tqdm(enumerate(valid_data), desc='evaluating_bleu'):
            if batch is None:
                continue
            preds = model(batch_data=batch, source_ids=batch['source_ids'], source_mask=batch['source_mask'])
            for pred in preds:
                t = pred[0].cpu().numpy()
                t = list(t)
                if 0 in t:
                    t = t[:t.index(0)]
                text = tokenizer.decode(t, clean_up_tokenization_spaces=False)
                p.append(text)
    predictions = []
    with open(os.path.join(save_dir, "valid.output"), 'w', encoding='utf-8') as f, open(os.path.join(save_dir, "valid.gold"), 'w', encoding='utf-8') as f1:
        for ref, gold in zip(p, read_samples(valid_dir)):
            predictions.append(str(gold.idx) + '\t' + ref)
            f.write(str(gold.idx) + '\t' + ref + '\n')
            f1.write(str(gold.idx) + '\t' + gold.target + '\n')

    (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(save_dir, "valid.gold"))
    valid_bleu = round(bleu.bleuFromMaps(goldMap, predictionMap)[0], 2)
    return valid_bleu


def read_samples(filename):
    samples = []
    with open(filename, encoding='utf-8') as f:
        for idx, line in enumerate(f):
            line = line.strip()
            js = json.loads(line)
            if 'idx' not in js:
                js['idx'] = idx

            code = js['code']
            nl = ' '.join(js['docstring_tokens']).replace('\n', '')
            nl = ' '.join(nl.strip().split())
            ast_des = js['ast_des']
            samples.append(
                Sample(
                    idx=idx,
                    source=code,
                    ast_des=ast_des,
                    target=nl,
                )
            )
    return samples


def parse_args():
    ap = argparse.ArgumentParser("arguments for graphormer training")
    ap.add_argument('-b', '--batch_size', type=int, default=4, help='batch size')
    ap.add_argument('-as', '--accumulation_steps', type=int, default=8, help='gradient accumulation steps')
    ap.add_argument('-beam', '--beam_size', type=int, default=5, help='beam size')
    ap.add_argument('-ep', '--epoch_num', type=int, default=5, help='epoch num')
    ap.add_argument('-lr', '--learning_rate', type=float, default=5e-5, help='learning rate')
    ap.add_argument('-glr', '--graph_learning_rate', type=float, default=5e-5, help='graph learning rate')
    ap.add_argument('-wd', '--weight_decay', type=float, default=0.0, help='learning rate')
    ap.add_argument('-ae', '--adam_epsilon', type=float, default=1e-8, help='learning rate')
    ap.add_argument('-ss', '--scheduler_setting', type=str, default='WarmupLinear',
                    choices=['WarmupLinear', 'ConstantLR', 'WarmupConstant', 'WarmupCosine', 'WarmupCosineWithHardRestarts'])
    ap.add_argument('-mg', '--max_grad_norm', type=float, default=1.,
                    help='maximum gradient norm')
    ap.add_argument('-wp', '--warmup_percent', type=float, default=0.2,
                    help='how many percentage of steps are used for warmup')
    ap.add_argument('-sdo', '--seq2seq_dropout', type=float, default=0.1,
                    help='pgnn dropout probability')

    ap.add_argument('-nc', '--num_classes', type=int, default=768,
                    help='number of classes or regression targets')
    ap.add_argument('-na', '--num_atoms', type=int, default=512,
                    help='max nodes per graph')  # code_bert vocab大小
    ap.add_argument('-mn', '--max_nodes', type=int, default=512,
                    help='max nodes per graph')
    ap.add_argument('-ne', '--num_edges', type=int, default=6,
                    help='number of edge types in the graph')
    ap.add_argument('-nid', '--num_in_degree', type=int, default=512,
                    help='number of in degree types in the graph')
    ap.add_argument('-nod', '--num_out_degree', type=int, default=512,
                    help='number of out degree types in the graph')
    ap.add_argument('-ns', '--num_spatial', type=int, default=512,
                    help='number of spatial types in the graph')
    ap.add_argument('-ned', '--num_edge_dis', type=int, default=128,
                    help='number of edge dis types in the graph')
    ap.add_argument('-mhmd', '--multi_hop_max_dist', type=int, default=20,
                    help='max distance of multi-hop edges')

    ap.add_argument('-spm', '--spatial_pos_max', type=int, default=20,
                    help='max distance of multi-hop edges')
    ap.add_argument('-et', '--edge_type', type=str, default='multi_hop',
                    help='edge type in the graph')
    ap.add_argument('-s', '--seed', type=int, default=42,
                    help='common.seed')
    ap.add_argument('-tes', '--train_epoch_shuffle', type=bool, default=False,
                    help='whether to shuffle the dataset at each epoch')
    ap.add_argument('-udd', '--user_data_dir', type=str, default='',
                    help='path to the module of user-defined dataset')
    ap.add_argument('-sd', '--save_dir', type=str, default='../../../checkpoints',
                    help='path to save the checkpoints of the model')

    ap.add_argument('-mtl', '--max_target_length', type=int, default=256,
                    help='inference length')

    args = ap.parse_args()
    return args


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    # torch.cuda.manual_seed_all(seed)
    # np.random.seed(seed)
    # random.seed(seed)
    # torch.backends.cudnn.deterministic = True


if __name__ == '__main__':

    args = parse_args()
    batch_size, accumulation_steps, beam_size, epoch_num, lr, graph_lr, weight_decay, adam_epsilon, scheduler_setting, max_grad_norm, warmup_percent, dropout, max_nodes, multi_hop_max_dist, spatial_pos_max, max_target_length, save_dir, seed = args.batch_size, args.accumulation_steps, args.beam_size, args.epoch_num, args.learning_rate, args.graph_learning_rate, args.weight_decay, args.adam_epsilon, args.scheduler_setting, args.max_grad_norm, args.warmup_percent, args.seq2seq_dropout, args.max_nodes, args.multi_hop_max_dist, args.spatial_pos_max, args.max_target_length, args.save_dir.strip(os.sep), args.seed
    device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu')

    #  Just some code to print debug information to stdout
    logging.basicConfig(format='%(asctime)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        level=logging.INFO,
                        handlers=[LoggingHandler()])
    #  /print debug information to stdout

    setup_seed(seed)

    train_dataset = CodeSearchNetPYGDataset(data_size=164814, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=-1, dataset="train", device=device)
    valid_dataset = CodeSearchNetPYGDataset(data_size=5179, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=-1, dataset="valid", device=device)

    # length = []
    # for i in range(len(train_dataset)):
    #     length.append(train_dataset[i]['label'].item())

    # train_sampler = CurriculumLearningSampler(train_dataset, thresholds=[64, 128, 256, 512], learning_pace=[1024, 8192, 32768], proportion=0.95)
    # train_sampler = GradualBucketedBatchSampler(train_dataset, length, batch_size=batch_size, num_batch_per_bucket=128)
    valid_sampler = RandomSampler(valid_dataset, num_samples=1000, replacement=True)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.collate)
    valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, sampler=valid_sampler, collate_fn=valid_dataset.collate)

    total_steps = math.ceil(epoch_num*len(train_dataloader)*1./batch_size)
    warmup_steps = int(total_steps*warmup_percent)

    checkpoint = 'microsoft/codebert-base'
    tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    ast_tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    roberta = RobertaModel.from_pretrained(checkpoint)
    roberta_config = RobertaConfig.from_pretrained(checkpoint)
    javalang_special_tokens = ['CompilationUnit', 'Import', 'Documented', 'Declaration', 'TypeDeclaration',
                               'PackageDeclaration',
                               'ClassDeclaration', 'EnumDeclaration', 'InterfaceDeclaration', 'AnnotationDeclaration',
                               'Type',
                               'BasicType', 'ReferenceType', 'TypeArgument', 'TypeParameter', 'Annotation',
                               'ElementValuePair',
                               'ElementArrayValue', 'Member', 'MethodDeclaration', 'FieldDeclaration',
                               'ConstructorDeclaration',
                               'ConstantDeclaration', 'ArrayInitializer', 'VariableDeclaration',
                               'LocalVariableDeclaration',
                               'VariableDeclarator', 'FormalParameter', 'InferredFormalParameter', 'Statement',
                               'IfStatement',
                               'WhileStatement', 'DoStatement', 'ForStatement', 'AssertStatement', 'BreakStatement',
                               'ContinueStatement',
                               'ReturnStatement', 'ThrowStatement', 'SynchronizedStatement', 'TryStatement',
                               'SwitchStatement',
                               'BlockStatement', 'StatementExpression', 'TryResource', 'CatchClause',
                               'CatchClauseParameter',
                               'SwitchStatementCase', 'ForControl', 'EnhancedForControl', 'Expression', 'Assignment',
                               'TernaryExpression',
                               'BinaryOperation', 'Cast', 'MethodReference', 'LambdaExpression', 'Primary', 'Literal',
                               'This',
                               'MemberReference', 'Invocation', 'ExplicitConstructorInvocation',
                               'SuperConstructorInvocation',
                               'MethodInvocation', 'SuperMethodInvocation', 'SuperMemberReference', 'ArraySelector',
                               'ClassReference',
                               'VoidClassReference', 'Creator', 'ArrayCreator', 'ClassCreator', 'InnerClassCreator',
                               'EnumBody',
                               'EnumConstantDeclaration', 'AnnotationMethod', 'Modifier']
    special_tokens_dict = {'additional_special_tokens': javalang_special_tokens}
    num_added_toks = ast_tokenizer.add_special_tokens(special_tokens_dict)

    args.num_atoms = tokenizer.vocab_size+num_added_toks+1
    graphormer_base_architecture(args)
    graph_encoder = GraphormerModel.build_model(args, None)
    graph_encoder.encoder.graph_encoder.graph_node_feature.atom_encoder.weight.data[:tokenizer.vocab_size, :] = roberta.get_input_embeddings().weight.data

    decoder_layer = nn.TransformerDecoderLayer(
        d_model=roberta_config.hidden_size, nhead=roberta_config.num_attention_heads)
    decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
    model = Seq2Seq(encoder=roberta, decoder=decoder, gnn_encoder=graph_encoder, config=roberta_config, beam_size=beam_size,
                    max_length=max_target_length, sos_id=tokenizer.cls_token_id, eos_id=tokenizer.sep_token_id).to(device)

    no_decay = ['bias', 'LayerNorm.weight']
    all_param_optimizer = list(model.named_parameters())
    gnn_param_optimizer = list(model.gnn_encoder.named_parameters())
    other_param_optimizer = []

    for op in all_param_optimizer:
        # print(op[0])
        if 'gnn_encoder' not in op[0]:
            other_param_optimizer.append(op)

    optimizer_grouped_parameters = [
        {'params': [p for n, p in other_param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': weight_decay, 'lr': lr},
        {'params': [p for n, p in other_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
         'lr': lr},

        {'params': [p for n, p in gnn_param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': weight_decay, 'lr': graph_lr},
        {'params': [p for n, p in gnn_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
         'lr': graph_lr}
    ]

    optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=lr, eps=adam_epsilon)
    scheduler = get_scheduler(optimizer, scheduler_setting, warmup_steps=warmup_steps, t_total=total_steps)

    best_loss = 1e6
    best_bleu = 0.0
    best_model_dic = None
    timestamp = int(time.time())
    os.mkdir(f'{save_dir}/{timestamp}')
    for ep in range(args.epoch_num):
        logging.info('\n=====epoch {}/{}====='.format(ep, args.epoch_num))
        model_dic = train(model, optimizer, scheduler, train_dataloader, valid_dataloader, accumulation_steps, best_loss, best_bleu, f'{save_dir}/{timestamp}', valid_dataset.ground_truth)
        if model_dic is not None:
            best_model_dic = model_dic
            model.load_model(best_model_dic)
    # assert best_model_dic is not None

    # for testing load the best model
    # model.load_model(best_model_dic)
    logging.info('\n=====Training finished.=====')
