import os
import logging
import torch
import torch.nn as nn
from tqdm import tqdm
import json
import argparse
from transformers import (
    RobertaTokenizer,
    RobertaConfig,
    RobertaModel
)

from v1.training_process.data import CodeSearchNetPYGDataset

from torch.utils.data import DataLoader
from v1.training_process.metrics import bleu

from v1.models import GraphormerModel, graphormer_base_architecture, Seq2Seq


class LoggingHandler(logging.Handler):
    def __init__(self, level=logging.NOTSET):
        super().__init__(level)

    def emit(self, record):
        try:
            msg = self.format(record)
            tqdm.write(msg)
            self.flush()
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)


class Sample(object):
    def __init__(self, idx, source, ast_des, target):
        self.idx = idx
        self.source = source
        self.ast_des = ast_des
        self.target = target


def evaluate(model, test_data, save_dir, test_dir):
    model.eval()
    p = []
    with torch.no_grad():
        for step, batch in tqdm(enumerate(test_data), desc='evaluating_bleu'):
            if batch is None:
                continue
            preds = model(batch_data=batch, source_ids=batch['source_ids'], source_mask=batch['source_mask'])
            for pred in preds:
                t = pred[0].cpu().numpy()
                t = list(t)
                if 0 in t:
                    t = t[:t.index(0)]
                text = tokenizer.decode(t, clean_up_tokenization_spaces=False)
                p.append(text)
    predictions = []
    with open(os.path.join(save_dir, "test.output"), 'w', encoding='utf-8') as f, open(
            os.path.join(save_dir, "test.gold"), 'w', encoding='utf-8') as f1:
        for ref, gold in zip(p, read_samples(test_dir)):
            predictions.append(str(gold.idx) + '\t' + ref)
            f.write(str(gold.idx) + '\t' + ref + '\n')
            f1.write(str(gold.idx) + '\t' + gold.target + '\n')

    (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(save_dir, "valid.gold"))
    valid_bleu = round(bleu.bleuFromMaps(goldMap, predictionMap)[0], 2)
    return valid_bleu


def read_samples(filename):
    samples = []
    with open(filename, encoding='utf-8') as f:
        for idx, line in enumerate(f):
            line = line.strip()
            js = json.loads(line)
            if 'idx' not in js:
                js['idx'] = idx

            code = js['code']
            nl = ' '.join(js['docstring_tokens']).replace('\n', '')
            nl = ' '.join(nl.strip().split())
            ast_des = js['ast_des']
            samples.append(
                Sample(
                    idx=idx,
                    source=code,
                    ast_des=ast_des,
                    target=nl,
                )
            )
    return samples


def parse_args():
    ap = argparse.ArgumentParser("arguments for graphormer training")
    ap.add_argument('-b', '--batch_size', type=int, default=4, help='batch size')
    ap.add_argument('-as', '--accumulation_steps', type=int, default=8, help='gradient accumulation steps')
    ap.add_argument('-beam', '--beam_size', type=int, default=10, help='beam size')
    ap.add_argument('-ep', '--epoch_num', type=int, default=1, help='epoch num')
    ap.add_argument('-lr', '--learning_rate', type=float, default=5e-5, help='learning rate')
    ap.add_argument('-glr', '--graph_learning_rate', type=float, default=5e-5, help='learning rate')
    ap.add_argument('-wd', '--weight_decay', type=float, default=0.0, help='learning rate')
    ap.add_argument('-ae', '--adam_epsilon', type=float, default=1e-8, help='learning rate')
    ap.add_argument('-ss', '--scheduler_setting', type=str, default='WarmupLinear',
                    choices=['WarmupLinear', 'ConstantLR', 'WarmupConstant', 'WarmupCosine', 'WarmupCosineWithHardRestarts'])
    ap.add_argument('-mg', '--max_grad_norm', type=float, default=1.,
                    help='maximum gradient norm')
    ap.add_argument('-wp', '--warmup_percent', type=float, default=0.2,
                    help='how many percentage of steps are used for warmup')

    ap.add_argument('-nc', '--num_classes', type=int, default=768,
                    help='number of classes or regression targets')

    ap.add_argument('-na', '--num_atoms', type=int, default=512,
                    help='max nodes per graph')  # code_bert vocab大小
    ap.add_argument('-mn', '--max_nodes', type=int, default=512,
                    help='max nodes per graph')
    ap.add_argument('-ne', '--num_edges', type=int, default=512,
                    help='number of edge types in the graph')
    ap.add_argument('-nid', '--num_in_degree', type=int, default=512,
                    help='number of in degree types in the graph')
    ap.add_argument('-nod', '--num_out_degree', type=int, default=512,
                    help='number of out degree types in the graph')
    ap.add_argument('-ns', '--num_spatial', type=int, default=512,
                    help='number of spatial types in the graph')
    ap.add_argument('-ned', '--num_edge_dis', type=int, default=128,
                    help='number of edge dis types in the graph')
    ap.add_argument('-mhmd', '--multi_hop_max_dist', type=int, default=5,
                    help='max distance of multi-hop edges')

    ap.add_argument('-spm', '--spatial_pos_max', type=int, default=1024,
                    help='max distance of multi-hop edges')
    ap.add_argument('-et', '--edge_type', type=str, default='multi_hop',
                    help='edge type in the graph')
    ap.add_argument('-s', '--seed', type=int, default=42,
                    help='common.seed')
    ap.add_argument('-tes', '--train_epoch_shuffle', type=bool, default=False,
                    help='whether to shuffle the dataset at each epoch')
    ap.add_argument('-udd', '--user_data_dir', type=str, default='',
                    help='path to the module of user-defined dataset')
    ap.add_argument('-sd', '--save_dir', type=str, default='../../checkpoints',
                    help='path to save the checkpoints of the model')

    ap.add_argument('-mtl', '--max_target_length', type=int, default=256,
                    help='inference length')

    args = ap.parse_args()
    return args


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    # torch.cuda.manual_seed_all(seed)
    # np.random.seed(seed)
    # random.seed(seed)
    # torch.backends.cudnn.deterministic = True


if __name__ == '__main__':

    args = parse_args()
    batch_size, beam_size, max_nodes, multi_hop_max_dist, spatial_pos_max, max_target_length, save_dir, seed = args.batch_size, args.beam_size, args.max_nodes, args.multi_hop_max_dist, args.spatial_pos_max, args.max_target_length, args.save_dir.strip(os.sep), args.seed
    device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')

    #  Just some code to print debug information to stdout
    logging.basicConfig(format='%(asctime)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        level=logging.INFO,
                        handlers=[LoggingHandler()])
    #  /print debug information to stdout

    setup_seed(seed)

    test_dataset = CodeSearchNetPYGDataset(data_size=10952, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=-1, dataset="test", device=device)

    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.collate)

    checkpoint = 'microsoft/codebert-base'
    tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    ast_tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    roberta = RobertaModel.from_pretrained(checkpoint)
    roberta_config = RobertaConfig.from_pretrained(checkpoint)
    javalang_special_tokens = ['CompilationUnit', 'Import', 'Documented', 'Declaration', 'TypeDeclaration',
                               'PackageDeclaration',
                               'ClassDeclaration', 'EnumDeclaration', 'InterfaceDeclaration', 'AnnotationDeclaration',
                               'Type',
                               'BasicType', 'ReferenceType', 'TypeArgument', 'TypeParameter', 'Annotation',
                               'ElementValuePair',
                               'ElementArrayValue', 'Member', 'MethodDeclaration', 'FieldDeclaration',
                               'ConstructorDeclaration',
                               'ConstantDeclaration', 'ArrayInitializer', 'VariableDeclaration',
                               'LocalVariableDeclaration',
                               'VariableDeclarator', 'FormalParameter', 'InferredFormalParameter', 'Statement',
                               'IfStatement',
                               'WhileStatement', 'DoStatement', 'ForStatement', 'AssertStatement', 'BreakStatement',
                               'ContinueStatement',
                               'ReturnStatement', 'ThrowStatement', 'SynchronizedStatement', 'TryStatement',
                               'SwitchStatement',
                               'BlockStatement', 'StatementExpression', 'TryResource', 'CatchClause',
                               'CatchClauseParameter',
                               'SwitchStatementCase', 'ForControl', 'EnhancedForControl', 'Expression', 'Assignment',
                               'TernaryExpression',
                               'BinaryOperation', 'Cast', 'MethodReference', 'LambdaExpression', 'Primary', 'Literal',
                               'This',
                               'MemberReference', 'Invocation', 'ExplicitConstructorInvocation',
                               'SuperConstructorInvocation',
                               'MethodInvocation', 'SuperMethodInvocation', 'SuperMemberReference', 'ArraySelector',
                               'ClassReference',
                               'VoidClassReference', 'Creator', 'ArrayCreator', 'ClassCreator', 'InnerClassCreator',
                               'EnumBody',
                               'EnumConstantDeclaration', 'AnnotationMethod', 'Modifier']
    special_tokens_dict = {'additional_special_tokens': javalang_special_tokens}
    num_added_toks = ast_tokenizer.add_special_tokens(special_tokens_dict)

    args.num_atoms = tokenizer.vocab_size + num_added_toks + 1
    graphormer_base_architecture(args)
    graph_encoder = GraphormerModel.build_model(args, None)

    decoder_layer = nn.TransformerDecoderLayer(d_model=roberta_config.hidden_size, nhead=roberta_config.num_attention_heads)
    decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)

    model = Seq2Seq(encoder=roberta, decoder=decoder, gnn_encoder=graph_encoder, config=roberta_config, beam_size=beam_size,
                    max_length=max_target_length, sos_id=tokenizer.cls_token_id, eos_id=tokenizer.sep_token_id).to(device)

    test_dir = f"{save_dir}/1663248419"
    model.load_state_dict(torch.load(f"{test_dir}/model_18431_0.3553607285055857.pt"), strict=True)

    best_loss = 1e6
    best_model_dic = None
    eval_bleu = evaluate(model, test_dataloader, test_dir, test_dataset.ground_truth)
    print("bleu score= {}".format(eval_bleu))
    logging.info('\n=====Evaluating finished.=====')
