import torch
import pandas as pd
import numpy as np
from src import const
import importlib
import argparse
import json
import os
import nltk


def merge_const(module_name):
    new_conf = importlib.import_module(module_name)
    for key, value in new_conf.__dict__.items():
        if not(key.startswith('_')):
            # const.__dict__[key] = value
            setattr(const, key, value)
            print('override', key, value)


def parse_args_and_merge_const():
    parser = argparse.ArgumentParser()
    parser.add_argument('--conf', default='', type=str)
    # parser.add_argument('--pv_model', default='', type=str)
    parser.add_argument('--model', default='', type=str)  # for evaluation, not for training
    args = parser.parse_args()
    if args.conf != '':
        merge_const(args.conf)
    return args


class Vocab(object):
    def __init__(self, text="", max_vocab=None, filepath=None):
        if max_vocab is None:
            max_vocab = const.MAX_VOCAB_SIZE
        # 会把未知单词映射成这个max_vocab，总size max_vocab + 1
        if filepath is None:
            if isinstance(text, str):
                text = text.split()
            words = nltk.FreqDist(text)
            self.words = [word for word, _ in words.most_common(max_vocab)]
            self.word2id = {word: idx for idx, word in enumerate(self.words)}
        else:
            import pickle
            with open(filepath, 'rb') as f:
                [self.words, self.word2id] = pickle.load(f)

    def __len__(self):
        return len(self.words) + 1

    def to_id(self, word):
        if word in self.word2id:
            return self.word2id[word]
        else:
            return len(self.word2id)  # unknown

    def to_word(self, idx):
        if idx < len(self.word2id):
            return self.words[idx]
        else:
            return '<unk>'

    def save(self, filepath):
        import pickle
        with open(filepath, 'wb') as f:
            pickle.dump([self.words, self.word2id], f)

    def get_embeddings(self, embedding_path=const.EMBED_PRETRAINED_PATH):
        import gensim
        print('Please wait ... (it could take a while to load the file : {})'.format(embedding_path))
        model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path, binary=True)
        self.model = model

        word_dict = self.word2id
        embedding_weights = np.random.uniform(-0.25, 0.25, (len(self), const.WORD_EMBED_SIZE))

        for word in word_dict:
            word_id = word_dict[word]
            if word in model.wv.vocab:
                embedding_weights[word_id, :] = model[word]
        return embedding_weights


def build_vocab(js):
    text = [item["name"].strip() for outfit in js for item in outfit['items']]
    text = ' '.join(text)
    return Vocab(text)


def load_json(file_path):
    with open(file_path) as f:
        return json.load(f)


class CompatibilityBenchmarkHelper(object):

    def __init__(self, dataset, net):
        self.net = net
        self.dataset = dataset
        self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=const.VAL_BATCH_SIZE, shuffle=False, num_workers=4)

    def get_benchmark(self):
        from sklearn.metrics import roc_auc_score
        self.comp_metrics = []
        self.net.eval()
        with torch.no_grad():
            step = 0
            for sample in self.dataloader:
                step += 1
                if step % 10 == 0:
                    print("Evaluate Compatibility Step[{}/{}]".format(step, len(self.dataloader)))
                for key in sample:
                    if isinstance(sample[key], torch.Tensor):
                        sample[key] = sample[key].to(const.device)
                # loss中的"outfit_comp_metrics" 作为指标
                output = self.net(sample)
                if hasattr(self.net, 'score_compatibility'):
                    self.comp_metrics.append(self.net.score_compatibility(output))
                else:
                    loss = self.net.cal_loss(sample, output)
                    self.comp_metrics.append(loss['outfit_comp_metrics'])
        self.comp_metrics = torch.cat(self.comp_metrics).detach().cpu().numpy()
        return roc_auc_score(self.dataset.labels, self.comp_metrics)


class FITBBenchMarkHelper(object):

    def __init__(self, dataset, net):
        self.net = net
        self.dataset = dataset

    def get_benchmark(self):
        with torch.no_grad():
            self.net.eval()
            cnt = 0
            correct = 0
            for i in range(len(self.dataset)):
                cnt += 1
                if cnt % 10 == 0:
                    print("Evaluate FITB Step[{}/{}] Accuracy: {}".format(cnt, len(self.dataset), correct / cnt))
                sample = self.dataset[i]
                for key in sample:
                    if isinstance(sample[key], torch.Tensor):
                        sample[key] = sample[key].to(const.device)
                if self.net.fitb_ans(sample) == 0:
                    correct += 1
            return float(correct) / cnt
