import sys
import logging
import torch
import torch.nn as nn
from tqdm import tqdm
import math
import argparse
import copy
from transformers import (
    RobertaTokenizer,
    RobertaConfig,
    RobertaModel,
    get_constant_schedule,
    get_constant_schedule_with_warmup,
    get_cosine_schedule_with_warmup,
    get_linear_schedule_with_warmup,
    get_cosine_with_hard_restarts_schedule_with_warmup,
    AdamW
)

from v1.training_process.data import CodeSearchNetPYGDataset

from torch.utils.data import DataLoader
from v1.models import GraphormerModel, graphormer_base_architecture


class LoggingHandler(logging.Handler):
    def __init__(self, level=logging.NOTSET):
        super().__init__(level)

    def emit(self, record):
        try:
            msg = self.format(record)
            tqdm.write(msg)
            self.flush()
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)


def get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
    """
    Returns the correct learning rate scheduler
    """
    scheduler = scheduler.lower()
    if scheduler == 'constantlr':
        return get_constant_schedule(optimizer)
    elif scheduler == 'warmupconstant':
        return get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
    elif scheduler == 'warmuplinear':
        return get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    elif scheduler == 'warmupcosine':
        return get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    elif scheduler == 'warmupcosinewithhardrestarts':
        return get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
    else:
        raise ValueError("Unknown scheduler {}".format(scheduler))


def train(model, optimizer, scheduler, train_data, dev_data, max_grad_norm, num_classes, best_acc):
    loss_fn = nn.CrossEntropyLoss()

    best_model_weights = None

    eval = False

    batch_num = len(train_data)
    with tqdm(enumerate(train_data), total=batch_num, position=0, file=sys.stdout, desc="training", ascii=True) as iterator:
        for step, batch in iterator:
            if batch is None:
                continue
            model.train()  # model was in eval mode in evaluate(); re-activate the train mode
            optimizer.zero_grad()  # clear gradients first
            torch.cuda.empty_cache()  # releases all unoccupied cached memory

            logits = model(batch)
            if logits is None:
                continue
            true_labels = batch['y'].view(-1)
            loss = loss_fn(logits[:, 1:, :].reshape(-1, num_classes), true_labels)
            iterator.set_postfix_str("batch loss= {}".format(loss.item()))

            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)

            # update weights
            optimizer.step()

            # update training rate
            scheduler.step()

            eval = True

            if step >= 100 and (step + 1) % (batch_num // 3 + 1) == 0:
                acc = evaluate(model, dev_data, mute=True)
                logging.info('==> step {} dev acc: {}'.format(step, acc))
                if acc > best_acc:
                    best_acc = acc
                    best_model_weights = copy.deepcopy(model.cpu().state_dict())
                    torch.save(best_model_weights, f'model_{step}_{acc}.pt')
                    model.to(device)
                eval = False
        else:
            if eval:
                acc = evaluate(model, dev_data, mute=True)
                logging.info('==> step {} dev acc: {}'.format(len(train_data)-1, acc))
                if acc > best_acc:
                    best_acc = acc
                    best_model_weights = copy.deepcopy(model.cpu().state_dict())
                    torch.save(best_model_weights, f'model_last_{acc}.pt')
                    model.to(device)
    return best_model_weights


def evaluate(model, test_data, mute=False):
    model.eval()
    acc = 0
    with torch.no_grad():
        for step, batch in tqdm(enumerate(test_data), desc='evaluating'):
            # batch_shape = batch[0].shape
            # boundary_list = (batch[0] == 102).nonzero(as_tuple=True)[1].resize(batch_shape[0], 2).T.tolist()
            # lower = torch.arange(batch_shape[1])[None, :] <= torch.Tensor(boundary_list[1])[:, None]
            # upper = torch.arange(batch_shape[1])[None, :] > torch.Tensor(boundary_list[0])[:, None]
            # types = lower * upper
            probs = model(batch)
            pred = torch.argmax(probs[:, 1:, :], dim=-1)
            acc += torch.eq(pred, batch['y']).sum().float().item()
            if not mute:
                print('==>acc<==', acc)
                print('label meanings: 0: entailment, 1: neutral, 2: contradiction')
    acc /= len(test_data)
    return acc


def parse_args():
    ap = argparse.ArgumentParser("arguments for graphormer training")
    ap.add_argument('-b', '--batch_size', type=int, default=16, help='batch size')
    ap.add_argument('-ep', '--epoch_num', type=int, default=1, help='epoch num')
    ap.add_argument('-ss', '--scheduler_setting', type=str, default='WarmupLinear',
                    choices=['WarmupLinear', 'ConstantLR', 'WarmupConstant', 'WarmupCosine', 'WarmupCosineWithHardRestarts'])
    ap.add_argument('-mg', '--max_grad_norm', type=float, default=1.,
                    help='maximum gradient norm')
    ap.add_argument('-wp', '--warmup_percent', type=float, default=0.2,
                    help='how many percentage of steps are used for warmup')

    ap.add_argument('-fm', '--flag_m', type=int, default=3,
                    help='number of iterations to optimize the perturbations with flag objectives')
    ap.add_argument('-fss', '--flag_step_size', type=float, default=1e-3,
                    help='learing rate of iterations to optimize the perturbations with flag objective')
    ap.add_argument('-fmag', '--flag_mag', type=float, default=1e-3,
                    help='magnitude bound for perturbations in flag objectives')
    ap.add_argument('-nc', '--num_classes', type=int, default=128,
                    help='number of classes or regression targets')

    ap.add_argument('-na', '--num_atoms', type=int, default=512,
                    help='max nodes per graph')  # code_bert vocab大小
    ap.add_argument('-mn', '--max_nodes', type=int, default=512,
                    help='max nodes per graph')
    ap.add_argument('-ne', '--num_edges', type=int, default=512,
                    help='number of edge types in the graph')
    ap.add_argument('-nid', '--num_in_degree', type=int, default=512,
                    help='number of in degree types in the graph')
    ap.add_argument('-nod', '--num_out_degree', type=int, default=512,
                    help='number of out degree types in the graph')
    ap.add_argument('-ns', '--num_spatial', type=int, default=512,
                    help='number of spatial types in the graph')
    ap.add_argument('-ned', '--num_edge_dis', type=int, default=128,
                    help='number of edge dis types in the graph')
    ap.add_argument('-mhmd', '--multi_hop_max_dist', type=int, default=5,
                    help='max distance of multi-hop edges')

    ap.add_argument('-spm', '--spatial_pos_max', type=int, default=1024,
                    help='max distance of multi-hop edges')
    ap.add_argument('-et', '--edge_type', type=str, default='multi_hop',
                    help='edge type in the graph')
    ap.add_argument('-s', '--seed', type=int, default=1,
                    help='common.seed')
    ap.add_argument('-tes', '--train_epoch_shuffle', type=bool, default=False,
                    help='whether to shuffle the dataset at each epoch')
    ap.add_argument('-udd', '--user_data_dir', type=str, default='',
                    help='path to the module of user-defined dataset')

    ap.add_argument('-mtl', '--max_target_length', type=str, default='',
                    help='path to the module of user-defined dataset')

    args = ap.parse_args()
    return args


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    # torch.cuda.manual_seed_all(seed)
    # np.random.seed(seed)
    # random.seed(seed)
    # torch.backends.cudnn.deterministic = True


if __name__ == '__main__':

    args = parse_args()
    batch_size, epoch_num, scheduler_setting, max_grad_norm, warmup_percent, max_nodes, multi_hop_max_dist, spatial_pos_max, max_target_length, num_classes, seed = args.batch_size, args.epoch_num, args.scheduler_setting, args.max_grad_norm, args.warmup_percent, args.max_nodes, args.multi_hop_max_dist, args.spatial_pos_max, args.max_target_length, args.num_classes, args.seed
    device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')

    #  Just some code to print debug information to stdout
    logging.basicConfig(format='%(asctime)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        level=logging.INFO,
                        handlers=[LoggingHandler()])
    #  /print debug information to stdout

    setup_seed(seed)

    train_dataset = CodeSearchNetPYGDataset(data_size=164814, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=num_classes, dataset="train", device=device)
    valid_dataset = CodeSearchNetPYGDataset(data_size=5179, max_node=max_nodes, multi_hop_max_dist=multi_hop_max_dist, spatial_pos_max=spatial_pos_max, num_classes=num_classes, dataset="valid", device=device)

    # subset = Subset(train_dataset, range(16521*8, 16524*8))
    # train_dataloader = DataLoader(subset, batch_size=batch_size, shuffle=False, collate_fn=train_dataset.collate)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, collate_fn=train_dataset.collate)
    # train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.collate)
    valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, collate_fn=valid_dataset.collate)

    total_steps = math.ceil(epoch_num*len(train_dataloader)*1./batch_size)
    warmup_steps = int(total_steps*warmup_percent)

    checkpoint = 'microsoft/codebert-base'
    tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    ast_tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
    roberta = RobertaModel.from_pretrained(checkpoint)
    roberta_config = RobertaConfig.from_pretrained(checkpoint)
    javalang_special_tokens = ['CompilationUnit', 'Import', 'Documented', 'Declaration', 'TypeDeclaration',
                               'PackageDeclaration',
                               'ClassDeclaration', 'EnumDeclaration', 'InterfaceDeclaration', 'AnnotationDeclaration',
                               'Type',
                               'BasicType', 'ReferenceType', 'TypeArgument', 'TypeParameter', 'Annotation',
                               'ElementValuePair',
                               'ElementArrayValue', 'Member', 'MethodDeclaration', 'FieldDeclaration',
                               'ConstructorDeclaration',
                               'ConstantDeclaration', 'ArrayInitializer', 'VariableDeclaration',
                               'LocalVariableDeclaration',
                               'VariableDeclarator', 'FormalParameter', 'InferredFormalParameter', 'Statement',
                               'IfStatement',
                               'WhileStatement', 'DoStatement', 'ForStatement', 'AssertStatement', 'BreakStatement',
                               'ContinueStatement',
                               'ReturnStatement', 'ThrowStatement', 'SynchronizedStatement', 'TryStatement',
                               'SwitchStatement',
                               'BlockStatement', 'StatementExpression', 'TryResource', 'CatchClause',
                               'CatchClauseParameter',
                               'SwitchStatementCase', 'ForControl', 'EnhancedForControl', 'Expression', 'Assignment',
                               'TernaryExpression',
                               'BinaryOperation', 'Cast', 'MethodReference', 'LambdaExpression', 'Primary', 'Literal',
                               'This',
                               'MemberReference', 'Invocation', 'ExplicitConstructorInvocation',
                               'SuperConstructorInvocation',
                               'MethodInvocation', 'SuperMethodInvocation', 'SuperMemberReference', 'ArraySelector',
                               'ClassReference',
                               'VoidClassReference', 'Creator', 'ArrayCreator', 'ClassCreator', 'InnerClassCreator',
                               'EnumBody',
                               'EnumConstantDeclaration', 'AnnotationMethod', 'Modifier']
    special_tokens_dict = {'additional_special_tokens': javalang_special_tokens}
    num_added_toks = ast_tokenizer.add_special_tokens(special_tokens_dict)

    args.num_atoms = tokenizer.vocab_size+num_added_toks+1
    graphormer_base_architecture(args)
    model = GraphormerModel.build_model(args, None).to(device)

    optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-6, correct_bias=False)
    scheduler = get_scheduler(optimizer, scheduler_setting, warmup_steps=warmup_steps, t_total=total_steps)

    best_acc = -1.
    best_model_dic = None
    for ep in range(args.epoch_num):
        logging.info('\n=====epoch {}/{}====='.format(ep, args.epoch_num))
        model_dic = train(model, optimizer, scheduler, train_dataloader, valid_dataloader, max_grad_norm, num_classes, best_acc)
        if model_dic is not None:
            best_model_dic = model_dic
    # assert best_model_dic is not None

    # for testing load the best model
    # model.load_model(best_model_dic)
    logging.info('\n=====Training finished.=====')
