import os
import sys
from datasets import load_metric
from transformers import AutoTokenizer
from metrics.reddit.tokenizers import clean_str
from metrics.reddit.metrics import nlp_metrics


class EvaluateTool(object):
    def __init__(self, args, model_dir='microsoft/DialoGPT-medium'):
        self.args = args
        # self.bleu = load_metric('bleu')
        # self.tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=False)
        self._download_3rdparty()

    @staticmethod
    def _download_3rdparty():
        print('Downloading 3rdparty...')

        meteor_path = '3rdparty/meteor-1.5'
        meteor_dir_path = os.path.dirname(meteor_path)
        if os.path.exists(meteor_path):
            print('3rdparty exists, skip downloading')
            return

        os.makedirs(meteor_dir_path, exist_ok=True)

        meteor_1_5_url = 'http://www.cs.cmu.edu/~alavie/METEOR/download/meteor-1.5.tar.gz'
        save_path = 'meteor-1.5.tar.gz'

        cmd = list()
        cmd.extend(['cd', meteor_dir_path, ';'])

        if not os.path.exists(os.path.join(meteor_dir_path, save_path)):
            cmd.extend(['wget', meteor_1_5_url, '-O', save_path, ';'])
            cmd.extend(['tar', '-zxvf', save_path, ';'])
            cmd.extend(['rm', save_path, ';'])
        else:
            cmd.extend(['tar', '-zxvf', save_path, ';'])

        cmd.extend(['cd', '..', ';'])

        ret = os.system(' '.join(cmd))
        if ret != 0:
            sys.exit(ret)

    def _evaluation(self, temp_preds, temp_golds, section, limit=49):
        # load golds
        golds = list()
        skip_idx = list()
        for idx, gold in enumerate(temp_golds):
            if len(gold['seq_out']) <= limit:
                golds.append([clean_str(s) for s in gold['seq_out']])
            else:
                skip_idx.append(idx)

        # load preds
        cnt = 0
        preds = list()
        for idx, pred in enumerate(temp_preds):
            if cnt >= len(skip_idx) or idx != skip_idx[cnt]:
                preds.append(pred)
            else:
                cnt += 1
        preds = [clean_str(pred) for pred in preds]

        assert len(preds) == len(golds)

        max_len = max([len(gold) for gold in golds])
        print(max_len)
        refs = [list() for _ in range(max_len)]
        for gold in golds:
            if len(gold) < max_len:
                gold.extend([gold[-1]] * (max_len - len(gold)))

            for idx, s in enumerate(gold):
                refs[idx].append(s)

        assert len(refs[0]) == len(preds)

        # convert to correct format
        os.makedirs('temp', exist_ok=True)
        path_hyp = f'temp/{self.args.name}_hyp.txt'
        path_refs = [f'temp/{self.args.name}_ref{idx}.txt' for idx in range(max_len)]

        with open(path_hyp, 'w') as file:
            file.writelines([hyp + '\n' for hyp in preds])

        for idx in range(max_len):
            with open(path_refs[idx], 'w') as file:
                file.writelines([ref + '\n' for ref in refs[idx]])

        # calc metrics
        nist, bleu, meteor, entropy, diversity, avg_len = nlp_metrics(path_refs=path_refs, path_hyp=path_hyp)

        res = dict()
        for idx, n in enumerate(nist):
            res[f'NIST-{idx + 1}'] = n

        for idx, b in enumerate(bleu):
            res[f'BLEU-{idx + 1}'] = b

        for idx, ent in entropy:
            res[f'Entropy-{idx + 1}'] = ent

        for idx, div in diversity:
            res[f'Diversity-{idx + 1}'] = div

        res['METEOR'] = meteor
        res['Avg Len'] = avg_len

        return res, res['BLEU-4']

    def _hugging_face_bleu(self, preds, golds):
        multi_ref_data = dict()
        for pred, gold in zip(preds, golds):
            context = gold['context']
            seq_out = gold['seq_out']
            assert type(seq_out) is list

            if context not in multi_ref_data:
                multi_ref_data[context] = {
                    'pred': pred,
                    'golds': seq_out
                }
            else:
                assert pred == multi_ref_data[context]['pred']

                multi_ref_data[context]['golds'].extend(seq_out)

        bleu_preds_tokenizer = list()
        # bleu_preds_blank = list()

        bleu_golds_tokenizer = list()
        # bleu_golds_blank = list()
        for value in multi_ref_data.values():
            p = value['pred']
            gs = value['golds']

            bleu_preds_tokenizer.append(self.tokenizer.tokenize(p))
            # bleu_preds_blank.append(p.split(' '))

            bleu_golds_tokenizer.append([self.tokenizer.tokenize(g) for g in gs])
            # bleu_golds_blank.append([g.split(' ') for g in gs])

        bleu_4 = self.bleu.compute(predictions=bleu_preds_tokenizer, references=bleu_golds_tokenizer,
                                   max_order=4)
        bleu_2 = self.bleu.compute(predictions=bleu_preds_tokenizer, references=bleu_golds_tokenizer,
                                   max_order=2)

        return {'bleu-4': bleu_4['bleu'], 'bleu-2': bleu_2['bleu']}, bleu_4['bleu']

    def evaluate(self, preds, golds, section):
        return self._evaluation(preds, golds, section)
