import os
import logging

import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
import torch.utils.data.distributed as dist
# import horovod.torch as hvd
from seq2seq.trainer import SupervisedTrainer
# from seq2seq.models import EncoderRNN, DecoderRNN, TopKDecoder,Seq2seq
from seq2seq.models import Seq2SeqTransformer, GPT, GPTConfig, MoeTransformer, Transformer
from seq2seq.loss import Perplexity
from seq2seq.optim import Optimizer
from seq2seq.dataset import VocabField
from seq2seq.dataset.dialogDatasets import *
from seq2seq.evaluator import Predictor
from utils import *
from configParser import opt
from apex import amp

if opt.random_seed is not None: torch.cuda.manual_seed_all(opt.random_seed)

multi_gpu = False
if opt.device == 'cpu' or opt.device.isdigit():
    device = torch.device(f"cuda:{opt.device}" if opt.device.isdigit() else 'cpu')
else:
    multi_gpu = True
    hvd.init()
    device = torch.device(f"cuda" if opt.device else 'cpu')
    torch.cuda.set_device(hvd.local_rank())
    opt.batch_size = opt.batch_size

LOG_FORMAT = '%(asctime)s %(levelname)-8s %(message)s'
if opt.phase == 'train':
    logging.basicConfig(format=LOG_FORMAT, 
                        level=getattr(logging, opt.log_level.upper()),
                        filename=os.path.join(opt.model_dir, opt.log_file),
                        filemode='a' if opt.resume else 'w')
else:
    logging.basicConfig(format=LOG_FORMAT, 
                        level=getattr(logging, opt.log_level.upper()))
logger = logging.getLogger('train')
if not multi_gpu or hvd.rank() == 0: 
    logger.info(f"Train Log")
    logger.info(opt)

def get_last_checkpoint(model_dir):
    checkpoints_fp = os.path.join(model_dir, "checkpoints")
    try:
        with open(checkpoints_fp, 'r') as f:
            checkpoint = f.readline().strip()
    except:
        return None
    return checkpoint

if __name__ == "__main__":
    # Prepare Datasets and Vocab
    src_vocab_list = VocabField.load_vocab(opt.src_vocab_file)
    tgt_vocab_list = VocabField.load_vocab(opt.tgt_vocab_file)
    src_vocab = VocabField(src_vocab_list, vocab_size=opt.src_vocab_size)
    tgt_vocab = VocabField(tgt_vocab_list, vocab_size=opt.tgt_vocab_size, 
                            sos_token="<SOS>", eos_token="<EOS>")
    pad_id = tgt_vocab.word2idx[tgt_vocab.pad_token]

    # Prepare loss
    weight = torch.ones(len(src_vocab.vocab))
    loss = Perplexity(weight, pad_id)
    loss.to(device)

    # Initialize model

    model_name = opt.model_name
    vocab_size = max(len(src_vocab.vocab), len(tgt_vocab.vocab))
    if model_name == 'Transformer':
        seq2seq = Transformer(vocab_size, opt.max_tgt_length, opt.embedding_size)
    elif model_name == 'MoeTransformer':
        seq2seq = MoeTransformer(vocab_size, opt.max_his_length, opt.expert_num, opt.choose_expert_num, opt.his_num, opt.adapter_size, opt.embedding_size, opt.head_num, opt.ffn_dim, opt.n_layer)
    elif model_name == 'GPT':
        mconf = GPTConfig(opt.src_vocab_size, opt.max_src_length,
                  n_layer=8, n_head=8, n_embd=512)
        seq2seq = GPT(mconf)
    seq2seq.to(device)

    if opt.resume and not opt.load_checkpoint:
        last_checkpoint = get_last_checkpoint(opt.model_dir)
        if last_checkpoint:
            opt.load_checkpoint = os.path.join(opt.model_dir, last_checkpoint)
            opt.skip_steps = int(last_checkpoint.strip('.pt').split('/')[-1])

    if opt.load_checkpoint:
        seq2seq.load_state_dict(torch.load(opt.load_checkpoint))
        opt.skip_steps = int(opt.load_checkpoint.strip('.pt').split('/')[-1])
        if not multi_gpu or hvd.rank() == 0: logger.info(f"\nLoad from {opt.load_checkpoint}\n")
    # else:
    #     for param in seq2seq.parameters():
    #         param.data.uniform_(-opt.init_weight, opt.init_weight)
    
    # if opt.beam_width > 1 and opt.phase == "infer":
    #     if not multi_gpu or hvd.rank() == 0: logger.info(f"Beam Width {opt.beam_width}")
    #     seq2seq.decoder = TopKDecoder(seq2seq.decoder, opt.beam_width)

    if opt.phase == "train":
        # Prepare Train Data
        trans_data = TranslateData(pad_id)
        train_set = DialogDataset(opt.train_post_path,
                                  opt.train_resp_path,
                                  trans_data.translate_data,
                                  src_vocab,
                                  tgt_vocab,
                                  max_src_length=opt.max_src_length,
                                  max_tgt_length=opt.max_tgt_length,
                                  max_his_length=opt.max_his_length,
                                  his_data_fp=opt.train_his_path)
        train_sampler = dist.DistributedSampler(train_set, num_replicas=hvd.size(), rank=hvd.rank()) \
                            if multi_gpu else None
        train = DataLoader(train_set, 
                           batch_size=opt.batch_size, 
                           shuffle=False if multi_gpu else True,
                           sampler=train_sampler,
                           drop_last=True,
                           collate_fn=trans_data.collate_fn)

        dev_set = DialogDataset(opt.dev_post_path,
                                opt.dev_resp_path,
                                trans_data.translate_data,
                                src_vocab,
                                tgt_vocab,
                                max_src_length=opt.max_src_length,
                                max_tgt_length=opt.max_tgt_length,
                                max_his_length=opt.max_his_length,
                                his_data_fp=opt.dev_his_path)
        dev_sampler = dist.DistributedSampler(dev_set, num_replicas=hvd.size(), rank=hvd.rank()) \
                            if multi_gpu else None
        dev = DataLoader(dev_set, 
                        batch_size=opt.batch_size, 
                        shuffle=False, 
                        sampler=dev_sampler, 
                        collate_fn=trans_data.collate_fn)

        # Prepare optimizer
        # optimizer = Optimizer(optim.Adam(seq2seq.parameters(), lr=opt.learning_rate), max_grad_norm=opt.clip_grad)
        optimizer = optim.Adam(seq2seq.parameters(), lr=opt.learning_rate)
        if multi_gpu: 
            optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=seq2seq.named_parameters())
            hvd.broadcast_optimizer_state(optimizer, root_rank=0)
            hvd.broadcast_parameters(seq2seq.state_dict(), root_rank=0)

        if opt.use_apex:
            seq2seq, optimizer = amp.initialize(seq2seq, optimizer, opt_level='O1')

        else:
            optimizer = Optimizer(optimizer, max_grad_norm=opt.clip_grad)
            if opt.decay_factor:
                optimizer.set_scheduler(torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer.optimizer, 'min', factor=opt.decay_factor, patience=1))
            # optimizer.set_scheduler(WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_optimization_steps))

        # Prepare trainer and train
        t = SupervisedTrainer(loss=loss,
                              model_name=model_name, 
                              model_dir=opt.model_dir,
                              best_model_dir=opt.best_model_dir,
                              batch_size=opt.batch_size,
                              checkpoint_every=opt.checkpoint_every,
                              print_every=opt.print_every,
                              max_epochs=opt.max_epochs,
                              max_steps=opt.max_steps,
                              max_checkpoints_num=opt.max_checkpoints_num,
                              best_ppl=opt.best_ppl,
                              device=device,
                              multi_gpu=multi_gpu,
                              logger=logger)

        seq2seq = t.train(seq2seq, 
                          data=train,
                          start_step=opt.skip_steps, 
                          dev_data=dev,
                          optimizer=optimizer,
                          teacher_forcing_ratio=opt.teacher_forcing_ratio,
                          accumulation_steps=opt.accumulation_steps,
                          use_apex=opt.use_apex)

    elif opt.phase == "infer":
        # Predict
        predictor = Predictor(seq2seq, src_vocab.word2idx, tgt_vocab.idx2word, device)

        while True:
            seq_str = input("Type in a source sequence:")
            seq = list(seq_str.strip())
            ans = predictor.predict_n(seq, n=opt.beam_width) \
                if opt.beam_width > 1 else predictor.predict(seq)
            print(' '.join(ans))

    elif opt.phase == 'test':
        trans_data = TranslateData(pad_id)
        test_set = DialogDataset(opt.test_post_path,
                                  opt.test_resp_path,
                                  trans_data.translate_data,
                                  src_vocab,
                                  tgt_vocab,
                                  max_src_length=opt.max_src_length,
                                  max_tgt_length=opt.max_tgt_length,
                                  max_his_length=opt.max_his_length,
                                  his_data_fp=opt.test_his_path)
        train_sampler = dist.DistributedSampler(train_set, num_replicas=hvd.size(), rank=hvd.rank()) \
                            if multi_gpu else None
        test = DataLoader(test_set, 
                           batch_size=1, 
                           shuffle=False if multi_gpu else True,
                           sampler=train_sampler,
                           drop_last=True,
                           collate_fn=trans_data.collate_fn)
        fp_out = open('./experiment_legal_synthesizer/results.txt', 'w')
        with torch.no_grad():
            for batch in test:
                src_variables = batch['src'].to(device)
                tgt_variables = batch['tgt'].to(device)
                src_lens = batch['src_len'].view(-1).to(device)
                tgt_lens = batch['tgt_len'].view(-1).to(device)
                his_variable = None
                his_seg_variable = None
                if model_name == 'MoeTransformer':
                    his_variable = batch['his'].to(device)
                    his_seg_variable = batch['his_seg'].to(device)

                generated = []
                pre = []
                length = 0
                decoded_words = np.zeros((1, 50), dtype=np.int)
                repetition_penalty = 2.0
                temperature = 1.0
                sampled = True
                if model_name == 'MoeTransformer':
                    decoder_outputs = seq2seq(src_variables, his_variable, his_seg_variable)
                else:
                    decoder_outputs = seq2seq(src_variables)
                if sampled:
                    for di, decoder_di in enumerate(decoder_outputs):
                        decoder_di = F.softmax(decoder_di, dim=-1)
                        for _id in generated:
                            decoder_di[:, _id] /= repetition_penalty
                        decoder_di = decoder_di / temperature
                        # decoder_di[:,0] = -float('Inf')
                        filtered_logits = top_k_top_p_filtering(decoder_di[0], top_k=3, top_p=0)
                        next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
                        generated.append(next_token.item())
                        ni = next_token.squeeze().data.cpu().numpy() 
                        decoded_words[:,di]=ni
                        length += 1
                        if next_token.item() == 3:  # 遇到[SEP]则表明response生成结束
                            break
                    sample_words = decoded_words[0]
                    for idx in range(length):
                        word = sample_words[idx]
                        pre.append(tgt_vocab.idx2word[word])
                        if word == 3:
                            break
                    fp_out.write(' '.join(pre[:-1])+ '\n')
                    print(' '.join(pre))
        fp_out.close()

    # elif opt.phase == 'test':
    #     while True:
    #         tgt = ''
    #         seq_str = input("Type in a source sequence:")
    #         seq_str = seq_str.strip().split(' ')
    #         x = torch.LongTensor([src_vocab.word2idx[tok] for tok in seq_str]).view(1, -1).to(device)
    #         y = sample(seq2seq, x, 50, temperature=0.9, sample=True, top_k=5)[0]
    #         tgt = ' '.join([tgt_vocab.idx2word[int(i)] for i in y])
    #         print(tgt)
            
            


