# -*- coding: utf-8 -*-

import json
import logging
import os
import numpy as np
import torch
from functools import partial
from tqdm import tqdm
import matplotlib.pyplot as plt

from .utils import comply_rule, poem_format, ilogging

EVALPATH = './Eval/'

logger = logging.getLogger(__name__)
plogging = partial(ilogging, logger=logger)

"""
repport training results.
"""
def report_current_metric(EXPPATH, target_file_pre='cpg_eval', target_file_suffix='.json', model_type='LSTM', metric='perplexity'):
    data2metric = {}
    for fn in os.listdir(EXPPATH):
        fn_splits = fn.split('.')
        if fn.startswith(target_file_pre) and fn.endswith(target_file_suffix) and len(fn_splits) == 5:
            _, data_type, model_str, epoch, _ = fn_splits
            if model_str != model_type:
                continue
            data_type = data_type
            data2metric.setdefault(data_type, [])
            res = json.load(open(os.path.join(EXPPATH, fn), 'r'))
            data2metric[data_type].append((epoch, round(res[metric], 2)))
    for data_type, metrics in data2metric.items():
        metrics = sorted(metrics, key=lambda x: int(x[0]))
        data2metric[data_type] = metrics
    return data2metric

def report_cur_best_result(all_data_metric):
    dev_pred = all_data_metric['dev']
    test_pred = all_data_metric['test']
    dev_pred = sorted(dev_pred, key=lambda x:x[1], reverse=False)
    test_epo2metric = dict(test_pred)
    epoch = dev_pred[0][0]
    return epoch, test_epo2metric[epoch]

def aggregate_metric_epoch(output_path, model_type='LSTM', metric='perplexity', save=False):
    data_metric = report_current_metric(output_path, model_type=model_type, metric=metric)
    best_epoch, best_metric = report_cur_best_result(data_metric)
    print(f"best {metric}: {best_metric}, epoch: {best_epoch}")
    if save:
        dev_metric = [p[1] for p in data_metric['dev']]
        test_metric = [p[1] for p in data_metric['test']]
        epoch_num = min([len(dev_metric), len(test_metric)])
        dev_metric = dev_metric[:epoch_num]
        test_metric = test_metric[:epoch_num]
        epochs = [i for i in range(1, epoch_num + 1)]
        plt.figure(figsize=(10,6))
        plt.plot(epochs, dev_metric, color='b', label='dev',linewidth=1)
        plt.plot(epochs, test_metric, color='g', label='test',linewidth=1)
        plt.axvline(x=int(best_epoch),ls="--",c="black", label='current_best', linewidth=0.8)

        plt.xlabel('epoch')
        plt.ylabel(metric)
        if epoch_num > 30:
            loc = 10
        else:
            loc = 1
        x_loc = plt.MultipleLocator(loc)
        ax = plt.gca()
        ax.xaxis.set_major_locator(x_loc)
        plt.legend()
        plt.title(f"{metric} - epochs")
        plt.savefig('./metric_epoch.png')

    return best_epoch

"""
Chinese poetry generation
"""

def generate_poetry(forward_func, vocab, batch_first, mode='prefix', prefix='好', head='我好想你', beam_size=10, pnum=1, seed=0):
    """
    Apply the language model for the poetry generation with the beam search strategy.
    @param forward_func: the forward function of the language model.
    @param vocab: the vocabulary.
    @param batch_first: whether the model requires batch_first inputs.
    @param mode: options {'prefix', 'head'}
    """
    np.random.seed(seed)
    mode_options = ['prefix', 'head']
    candidates = []
    if mode == 'prefix':
        prefix_sents = [['<s>'] + list(prefix)]
    elif mode == 'head':
        prefix_sents = [['<s>'] + [head[0]]]
    else:
        raise ValueError(f'Invalid mode: {mode}. Options: {mode_options}')
    output, _ = forward_func(prefix_sents)
    last_char = output[0, -1, :] if batch_first else output[-1, 0, :]     #  get the last char
    last_char_prob = torch.softmax(last_char, dim=-1).detach()    
    char_idxes = select_idxes(last_char_prob, beam_size)
    for char_idx in char_idxes:
        candidates.append({'wordlist':[*prefix_sents[0], vocab.get_id2word(char_idx.item())], \
                    'sent_num':0,
                    'prob':np.log(last_char_prob[char_idx].item())})
    update = True   #record whether candidates update during loop, if not then end the loop
    while update:
        new_candidates = []
        update = True
        for candi in candidates:
            if candi['sent_num'] == 4 or candi['wordlist'][-1] == '</s>':
                new_candidates.append(candi)
                update = False
                continue
            output, _ = forward_func([candi['wordlist']])
            last_char = output[0, -1, :] if batch_first else output[-1, 0, :]     #  get the last char
            last_char_prob = torch.softmax(last_char, dim=-1).detach()    
            char_idxes = select_idxes(last_char_prob, beam_size)
            for char_idx in char_idxes:
                new_char = vocab.get_id2word(char_idx.item())
                # if not comply_rule(''.join(candi['wordlist']) + new_char):
                    #   new_char = '|'
                if mode == 'head' and candi['wordlist'][-1] == '|':
                        new_char = head[candi['sent_num']]
                new_candidates.append({'wordlist':[*list(candi['wordlist']), new_char], \
                            'sent_num':candi['sent_num'] + int(new_char=='|'),
                            'prob':candi['prob'] + np.log(last_char_prob[char_idx.item()].item())})
        
        # sort new_candidates by prob/len(wordlist), choosen beam_size best candidates
        new_candidates = sorted(new_candidates, key=lambda x: -x['prob'])
        candidates = new_candidates[:beam_size]
    
    chars = [''.join(poem_format(cand['wordlist'])) for cand in candidates[:pnum]]

    return chars

def select_idxes(output, beam_size, sample=True):
    """
    randomly select sqrt(beam_size) candidates from top_beam_size output 
    by the probability of each candidate.
    """
    if sample:
        char_idxes = torch.topk(output, beam_size).indices.cpu().numpy()
        prob = (output[char_idxes] / torch.sum(output[char_idxes])).cpu().numpy()
        return np.random.choice(char_idxes, size=int(np.sqrt(beam_size)), replace=False, p=prob)
    else:
        char_idxes = torch.topk(output, int(np.sqrt(beam_size))).indices.cpu().numpy()
        return char_idxes

def evaluate_model_on_set(model, batch_first, dump_name):
    eval_fp = os.path.join(EVALPATH, 'eval_file.json')
    dump_path = os.path.join(EVALPATH, 'result', f'{dump_name}.json')
    eval_dict = json.load(open(eval_fp, 'r'))
    
    plogging('Start model evaluation')
    five_tok = '<f>'
    seven_tok = '<s>'
    prefix_poems = {'five':{}, 'seven':{}}
    for prefix in tqdm(eval_dict['prefix'], desc='prefix'):
        poems_5 = model.generate_poetry(batch_first, mode='prefix', prefix=prefix, beam_size=5, start_token=five_tok)
        poems_7 = model.generate_poetry(batch_first, mode='prefix', prefix=prefix, beam_size=5, start_token=seven_tok)
        prefix_poems['five'][prefix] = poems_5
        prefix_poems['seven'][prefix] = poems_7
    
    head_poems = {'five':{}, 'seven':{}}
    for head in tqdm(eval_dict['head'], desc='head'):
        poems_5 = model.generate_poetry(batch_first, mode='head', head=head, beam_size=5, start_token=five_tok)
        poems_7 = model.generate_poetry(batch_first, mode='head', head=head, beam_size=5, start_token=seven_tok)
        head_poems['five'][head] = poems_5
        head_poems['seven'][head] = poems_7
    
    all_poems = dict(prefix=prefix_poems, head=head_poems)
    json.dump(all_poems, open(dump_path, 'w'))
    plogging(f"Successfully eval the model and dump to {dump_path}")
