import torch
import numpy as np

from Dataloader.twitterloader import valid_data_ID
from Dataloader.twitterloader import valid_data_len
from Dataloader.twitterloader import valid_data_y
from Dataloader.twitterloader import data

from Dataloader.twitterloader import word2vec
from sklearn.metrics import accuracy_score, precision_score, recall_score


def accuracy_on_valid_data(rdm_model=None, sent_pooler=None, rdm_classifier=None, new_data_len=[], cuda=True):
    batch_size = 20
    t_steps = int(len(valid_data_ID) / batch_size)
    sum_acc = 0.0
    miss_vec = 0
    mts = 0
    hit_vec = 0
    fn = 1
    if len(new_data_len) > 0:
        t_data_len = new_data_len
    else:
        t_data_len = valid_data_len
    labels = []
    preds = []
    for step in range(t_steps):
        data_x = []
        m_data_y = np.zeros([batch_size, 2], dtype=np.int32)
        m_data_len = np.zeros([batch_size], dtype=np.int32)
        for i in range(batch_size):
            m_data_y[i] = valid_data_y[mts]
            seq = []
            for j in range(0, min(max_seq_len, t_data_len[mts]), fn):
                # for j in range(0, t_data_len[mts], fn):
                sent = []
                t_words = []
                for jj in range(j, min(j + fn, t_data_len[mts])):
                    t_words.extend(data[valid_data_ID[mts]]['text'][jj])
                # t_words = data[valid_data_ID[mts]]['text'][j]
                if len(t_words) == 0:
                    print("ID:%s   j:%3d    empty sentence:" % (valid_data_ID[mts], j), t_words)
                    continue

                for k in range(len(t_words)):
                    m_word = t_words[k]
                    try:
                        sent.append(torch.tensor([word2vec[m_word]], dtype=torch.float32))
                    except KeyError:
                        miss_vec += 1
                        sent.append(torch.tensor(
                            [word2vec['{'] + word2vec['an'] + word2vec['unknown'] + word2vec['word'] + word2vec['}']],
                            dtype=torch.float32))
                    except IndexError:
                        raise
                    else:
                        hit_vec += 1
                sent_tensor = torch.cat(sent)
                seq.append(sent_tensor)
            m_data_len[i] = len(seq)
            data_x.append(seq)
            mts += 1
            if mts >= len(valid_data_ID):  # read data looply
                mts = mts % len(valid_data_ID)

        if rdm_model is not None and sent_pooler is not None and rdm_classifier is not None:
            with torch.no_grad():
                seq = sent_pooler(data_x)
                rdm_hiddens = rdm_model(seq)
                batchsize, _, _ = rdm_hiddens.shape
                rdm_outs = torch.cat(
                    [rdm_hiddens[i][m_data_len[i] - 1].unsqueeze(0) for i in range(batchsize)]
                    # a list of tensor, where the ndim of tensor is 1 and the shape of tensor is [hidden_size]
                )
                rdm_scores = rdm_classifier(
                    rdm_outs
                )
                rdm_preds = rdm_scores.argmax(axis=1)
                y_label = torch.tensor(m_data_y).argmax(axis=1).cuda() if cuda else torch.tensor(m_data_y).argmax(
                    axis=1)
                preds.append(rdm_preds)
                labels.append(y_label)
            torch.cuda.empty_cache()
    pred_array = torch.cat(preds).cpu().numpy()
    label_array = torch.cat(labels).cpu().numpy()
    return accuracy_score(y_true=label_array, y_pred=pred_array), precision_score(y_true=label_array,
                                                                                  y_pred=pred_array), recall_score(
        y_true=label_array, y_pred=pred_array)


def accuracy_on_valid_data_V4(rdm_model=None, sent_pooler=None, subj_pooler=None, rdm_classifier=None, new_data_len=[],
                              cuda=True):
    batch_size = 20
    t_steps = int(len(valid_data_ID) / batch_size)
    miss_vec = 0
    mts = 0
    hit_vec = 0
    fn = 1
    if len(new_data_len) > 0:
        t_data_len = new_data_len
    else:
        t_data_len = valid_data_len
    labels = []
    preds = []
    for step in range(t_steps):
        data_x = []
        m_data_y = np.zeros([batch_size, 2], dtype=np.int32)
        m_data_len = np.zeros([batch_size], dtype=np.int32)
        for i in range(batch_size):
            m_data_y[i] = valid_data_y[mts]
            seq = []
            for j in range(0, min(50, t_data_len[mts]), fn):
                sent = []
                t_words = []
                for jj in range(j, min(j + fn, t_data_len[mts])):
                    t_words.extend(data[valid_data_ID[mts]]['text'][jj])
                # t_words = data[valid_data_ID[mts]]['text'][j]
                if len(t_words) == 0:
                    print("ID:%s   j:%3d    empty sentence:" % (valid_data_ID[mts], j), t_words)
                    continue

                for k in range(len(t_words)):
                    m_word = t_words[k]
                    try:
                        sent.append(torch.tensor([word2vec[m_word]], dtype=torch.float32))
                    except KeyError:
                        miss_vec += 1
                        sent.append(torch.tensor(
                            [word2vec['{'] + word2vec['an'] + word2vec['unknown'] + word2vec['word'] + word2vec['}']],
                            dtype=torch.float32))
                    except IndexError:
                        raise
                    else:
                        hit_vec += 1
                sent_tensor = torch.cat(sent)
                seq.append(sent_tensor)
            m_data_len[i] = len(seq)
            data_x.append(seq)
            mts += 1
            if mts >= len(valid_data_ID):  # read data looply
                mts = mts % len(valid_data_ID)

        if rdm_model is not None and sent_pooler is not None and rdm_classifier is not None:
            with torch.no_grad():
                subj_tensor = torch.nn.utils.rnn.pad_sequence([subj_pooler(seq) for seq in data_x], batch_first=True)
                seq = sent_pooler(data_x)
                feature_tensor = subj_tensor + seq
                rdm_hiddens = rdm_model(feature_tensor)
                batchsize, _, _ = rdm_hiddens.shape
                rdm_outs = torch.stack(
                    [rdm_hiddens[i][m_data_len[i] - 1] for i in range(batchsize)]
                    #                     [ rdm_hiddens[i][0] for i in range(batchsize)]
                )
                rdm_scores = rdm_classifier(
                    rdm_outs
                )
                rdm_preds = rdm_scores.argmax(axis=1)
                y_label = torch.tensor(m_data_y).argmax(axis=1).cuda() if cuda else torch.tensor(m_data_y).argmax(
                    axis=1)
                preds.append(rdm_preds)
                labels.append(y_label)

    pred_array = torch.cat(preds).cpu().numpy()
    label_array = torch.cat(labels).cpu().numpy()
    return accuracy_score(y_true=label_array, y_pred=pred_array), precision_score(y_true=label_array,
                                                                                  y_pred=pred_array), recall_score(
        y_true=label_array, y_pred=pred_array)
