import pickle
import numpy as np
import torch
from torch import nn
from evaluate import evaluavtion_triple
from config import EMBEDDING_DIM, ENCODER_DIM, DECODER_DIM
from model.model_d import PairLoss, E2EModel_cus
from pre_process import load_data, get_batch
import torch.nn.functional as F

from util import find_pair

BASE_DIR = '../data/'
EXP_DIR = '../experiments/'
EXP_RESULTS_DIR = '../experiments/results/'
EXP_LOG_DIR = '../experiments/log/'
EXP_MODEL_DIR = '../experiments/model/'

ix2tag = pickle.load(open(BASE_DIR + 'ix2tag.pkl', 'rb'))
test_data = pickle.load(open(BASE_DIR + 'test_data_d.pkl', 'rb'))
# word_vec = pickle.load(open(BASE_DIR + 'wv_matrix.pkl', 'rb'))
word_vec = None
word2ix = pickle.load(open(BASE_DIR + 'word_index.pkl', 'rb'))
ix2dist = pickle.load(open(BASE_DIR + 'ix2dist.pkl', 'rb'))
# e2emodel = E2EModel_cus(len(word2ix)+1, EMBEDDING_DIM,ENCODER_DIM, word_vec,
#                         len(ix2tag)+1,len(ix2dist)+1, DECODER_DIM)
e2emodel = E2EModel_cus(len(word2ix)+1, EMBEDDING_DIM,ENCODER_DIM, word_vec,
                        len(ix2tag)+1, DECODER_DIM, len(ix2dist)+1)
params = torch.load(EXP_MODEL_DIR+"2018_10_05/e2emodel_01_46.pkl", map_location="cpu")
e2emodel.load_state_dict(params)
user_config = {
    'device':'cpu',
    'batch':64,
    'alpha':10
}


def test_model(model, data, user_config, ix2tag, ix2dist):

    device = user_config['device']
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    model.to(device)

    model.train(False)

    bias = [1.0, 1.0]
    bias.extend((alpha for i in range(len(ix2tag)-1)))
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))

    res = []
    losses = []
    s_score = []
    for i, batch in enumerate(get_batch(BATCH_SIZE, data)):
        sents, tags, dists = zip(*batch)
        x = torch.tensor(sents, device=device)
        y = torch.tensor(tags, device=device)
        d = torch.tensor(dists, device=device)

        BATCH_SIZE = len(sents) if len(sents) < BATCH_SIZE else BATCH_SIZE
        masked = x.gt(0).view(-1)
        target = masked.view(BATCH_SIZE, -1) * y.byte()

        tag_score, dist_score = model(x)
        tag_score = tag_score * masked.unsqueeze(1).float()
        pre_m = torch.argmax(tag_score, 1).view(BATCH_SIZE,-1)
        tag_loss = tag_criterion(tag_score, y.view(-1))
        # dist_score = dist_score * masked.unsqueeze(1).float()
        # dist_loss = dist_criterion(dist_score, d.view(-1))
        loss = tag_loss
        s_score.append(tag_score)

        losses.append(float(loss))

        for p_tag,t_tag in zip(pre_m.tolist(), target.tolist()):
            p_sent = []
            t_sent = []

            for p,t in zip(p_tag, t_tag):
                if p>0:
                    try:
                        p_sent.append(ix2tag[p])
                        t_sent.append(ix2tag[t])
                    except KeyError:
                        t_sent.append([''])
            res.append([p_sent, t_sent])

    model.train(True)
    return res, s_score


res, tag_score = test_model(e2emodel, test_data, user_config, ix2tag, ix2dist)
sample = tag_score[0].view(64, 50, -1)
pair = [[(2,5)], [(23,31)], [(14,19)]]
# tag_score = torch.cat().view(64,50,-1)
# sim_crit = torch.nn.CosineEmbeddingLoss()
sim_crit = PairLoss(device='cpu')
label = torch.tensor([1.0])
x1 = sample[0][0].unsqueeze(0)
x2 = sample[0][1].unsqueeze(0)
loss = sim_crit(sample, pair)
print(loss)
# loss.backward()
eval_res = evaluavtion_triple(res)
print(eval_res)