from train import *
from classifier import *
from ClassifierModel import *
from syntax.biaffine_parser import Biaffine_Parser
from data import Self_Corpus

class MultiLossCompute:
    """A classifier loss compute and train function on one device."""
    def __init__(self, dp_model, dp_optimizer,classifier,classifier_optimizer,args):
        self.dp_model = dp_model
        self.classifier = classifier
        self.dp_optimizer = dp_optimizer
        self.classifier_optimizer = classifier_optimizer
        self.args = args

    def __call__(self, words, tags, heads, labels,domain_labels,lengths):
        # Forward pass.
        S_arc, S_lab = self.dp_model(words=words, tags=tags)

        # Compute loss.
        if self.args.train_mode == 'selftrain':
            arc_loss = self.dp_model.arc_loss(S_arc, heads)
            lab_loss = self.dp_model.lab_loss(S_lab, heads, labels)
            loss = arc_loss +lab_loss

        else:
            score = self.classifier.forward(self.dp_model.h, lengths)
            domain_loss = self.classifier.compute_loss(domain_labels)
            if torch.sum(heads) == 0:
                arc_loss = None
                lab_loss = None
                S_arc = None
                S_lab = None
                loss = domain_loss
            else:
                arc_loss = self.dp_model.arc_loss(S_arc, heads)
                lab_loss = self.dp_model.lab_loss(S_lab, heads, labels)
                dp_loss = arc_loss + lab_loss
                loss = domain_loss + dp_loss
        #domain_loss_value = domain_loss.data.cpu().numpy()

        # Update parameters.
        self.dp_optimizer.zero_grad()
        loss.backward()
        self.dp_optimizer.step()
        loss_dict = dict(loss=loss.data.item())

        return  S_arc, S_lab, loss_dict

def multi_train(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    args.cuda = torch.cuda.is_available()
    print('Using cuda: {}'.format(args.cuda))

    # Initialize the data, model, and optimizer.

    corpus = Corpus(data_path=args.data, vocab_path=args.vocab, char=args.use_chars)
    dp_train_iter, domain_train_iter, dev_iter, _, _, _ = corpus.train.torch_batches(args.batch_size)

    if os.path.exists(args.dp_checkpoints):
        dp_model = torch.load(args.dp_checkpoints)
    else:
        dp_model = make_model(
                    args,
                    word_vocab_size=len(corpus.dictionary.w2i),
                    tag_vocab_size=len(corpus.dictionary.t2i),
                    num_labels=len(corpus.dictionary.l2i)
                )
    if os.path.exists(args.clf_checkpoints):
        classifier_model = torch.load(args.clf_checkpoints)
    else:
        classifier_model = ClassifierModel(args)
    classifier = DomainClassifier(classifier_model)
    print('Embedding parameters: {:,}'.format(dp_model.embedding.num_parameters))
    print('Encoder parameters: {:,}'.format(dp_model.encoder.num_parameters))
    print('Total model parameters: {:,}'.format(dp_model.num_parameters))
    if args.cuda:
        dp_model.cuda()
        classifier_model.cuda()

    if args.encoder == 'transformer':
        dp_optimizer = get_std_transformer_opt(args, dp_model)
        classifier_optimizer = get_std_transformer_opt(args, classifier_model)
    else:
        dp_optimizer = torch.optim.Adam(dp_model.parameters(), lr=args.lr)
        classifier_optimizer = torch.optim.Adam(classifier.model.parameters(), lr=args.lr)

    if args.cuda:
        device_count = torch.cuda.device_count()
        # if args.multi_gpu:
        #     devices = list(range(device_count))
        #     model = nn.DataParallel(model, device_ids=devices)
        #     train_step = MultiGPULossCompute(model, optimizer, devices)
        #     print('Training on {} GPUs: {}.'.format(device_count, devices))
        # else:
        train_step = MultiLossCompute(dp_model, dp_optimizer, classifier, classifier_optimizer,args)
        print('Training on 1 device out of {} availlable.'.format(device_count))
    else:
        train_step = MultiLossCompute(dp_model, dp_optimizer, classifier, classifier_optimizer,args)

    best_val_acc_filename = "{}/best_val_acc.npy".format(args.logdir)
    if os.path.exists(best_val_acc_filename):
        best_val_LAS = np.load(best_val_acc_filename)
    else:
        best_val_LAS = 0.

    timer = Timer()
    best_epoch = 0
    print('Start of training..')
    try:
        for epoch in range(1, args.epochs+1):
            if args.train_mode == 'multitrain':
                train_iter = zip(dp_train_iter,domain_train_iter)
            elif args.train_mode == 'selftrain' and (epoch == 1 or epoch >=11):
                train_iter = zip(dp_train_iter)
            elif args.train_mode == 'unitedtrain' and (epoch == 1 or epoch >=11):
                train_iter = zip(dp_train_iter,domain_train_iter)
            elif (args.train_mode == 'selftrain' or args.train_mode == 'unitedtrain') and 1 < epoch < 11:
                file = args.predict_file
                open('E:/workspace/PatholNLP-graduation experiment/data/dp_train/self_train.conll','w').close()
                with open(file, 'r', encoding='utf-8') as f:
                    for sentence in f:
                        syntax = Biaffine_Parser(args.dp_checkpoints)
                        sentence = sentence.replace('(', '（').replace(')', '）').replace(',', '，')
                        seg_result = syntax.segment.cut(sentence)
                        syntax.parse_dependency(seg_result, sentence,0)
                corpus = Self_Corpus(data_path=args.data, vocab_path=args.vocab, char=args.use_chars)
                self_train_iter, _ = corpus.self_train.torch_batches(args.batch_size)
                if args.train_mode == "self_train":
                    train_iter = zip(dp_train_iter, self_train_iter)
                else:
                    train_iter = zip(dp_train_iter,domain_train_iter,self_train_iter)
            #run_epoch(args, model, corpus, train_step)
            run_multitorchtext_epoch(args,dp_model,classifier,train_iter,train_step,epoch)

            # Evaluate model on validation set.
            # TODO: replace this with a UAS and LAS eval instead of this proxy
            arc_val_acc, lab_val_acc, val_LAS = evaluate_torchtext(args, dp_model, dev_iter)
            LOSSES['val_acc'].append([arc_val_acc, lab_val_acc, val_LAS])

            # Save model if it is the best so far.
            if val_LAS > best_val_LAS:
                torch.save(dp_model, args.dp_checkpoints)
                torch.save(classifier.model,args.clf_checkpoints)
                best_val_LAS = val_LAS
                best_epoch = epoch
                np.save(best_val_acc_filename, best_val_LAS)

            write_losses(LOSSES['train_loss'], LOSSES['train_acc'], LOSSES['val_acc'], args.logdir)
            # End epoch with some useful info in the terminal.
            print('-' * 89)
            print(
                '| End of epoch {:3d}/{} | Time {:5.2f}s | Valid LAS {:3.2f}% |'
                ' Best accuracy {:3.2f}% (epoch {:3d}) |'.format(
                    epoch,
                    args.epochs,
                    timer.elapsed(),
                    100*val_LAS,
                    100*best_val_LAS,
                    best_epoch)
            )
            print('-' * 89)
    except KeyboardInterrupt:
        print()
        print('-' * 89)
        print('Exiting from training early')

    # write_losses(LOSSES['train_loss'], LOSSES['train_acc'], LOSSES['val_acc'], args.logdir)
    arc_val_acc, lab_val_acc, val_LAS = evaluate_torchtext(args, dp_model, dev_iter)
    if val_LAS > best_val_LAS:
        torch.save(dp_model, args.checkpoints)
        torch.save(classifier.model, args.clf_checkpoints)
        best_val_LAS = val_LAS
        best_epoch = epoch
        np.save(best_val_acc_filename, best_val_LAS)

    print('=' * 89)
    print('| End of training | Best validation LAS {:3.2f} (epoch {}) |'.format(
        100*best_val_LAS, best_epoch))
    print('=' * 89)


def multimodeleval(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    args.cuda = torch.cuda.is_available()
    print('Using cuda: {}'.format(args.cuda))

    # Initialize the data, model, and optimizer.

    corpus = Corpus(data_path=args.data, vocab_path=args.vocab, char=args.use_chars)
    train_iter, dev_iter, _, _ = corpus.train.torch_batches(args.batch_size)

    arc_acc, lab_acc, LAS = 0, 0, 0
    for step, batch in enumerate(dev_iter):
        words = batch.text[0]
        tags = batch.tag
        heads = batch.head
        labels = batch.label
        if args.cuda:
            words, tags, heads, labels = words.cuda(), tags.cuda(), heads.cuda(), labels.cuda()
        S_arc_list = []
        S_lab_list = []
        for parer in args.dp_checkpointslist:
            if os.path.exists(parer):
                dp_model = torch.load(parer)
            else:
                print('de_model not exist')
                return 0
            if args.cuda:
                dp_model.cuda()
            dp_model.eval()
            S_arc, S_lab = dp_model(words=words, tags=tags)
            S_arc_list.append(S_arc)
            S_lab_list.append(S_lab)
        S_arc = np.sum(S_arc_list, 0) / len(args.dp_checkpointslist)
        S_lab = np.sum(S_lab_list, 0) / len(args.dp_checkpointslist)
        arc_acc += arc_accuracy(S_arc, heads)
        lab_acc += lab_accuracy(S_lab, heads, labels)
        LAS += Cal_LAS(S_arc, S_lab, heads, labels)

    arc_acc /= step
    lab_acc /= step
    LAS /= step

    print('arc_acc:', arc_acc)
    print('lab_acc:', lab_acc)
    print('LAS:', LAS)
