import pickle
import random
import torch
from torch import nn

import config
import numpy as np

BASE_DIR = './data/'


def get_embedding_matrix(word_index):
    with open('./data/w2v.pkl', 'rb') as fp:
        w2v = pickle.load(fp)
        embedding_matrix = np.zeros((config.WORD_SIZE+1, config.EMBEDDING_DIM))
        for word, index in word_index.items():
            embedding_vector = w2v.get(word)
            if embedding_vector is not None:
                embedding_matrix[index] = embedding_vector

        print('embedding_matrix complete')
        return embedding_matrix


def get_batch(batch_size, train_data):
    # random.shuffle(train_data)
    sindex = 0
    eindex = batch_size

    while eindex < len(train_data):
        batch_data = train_data[sindex : eindex]
        temp = eindex
        eindex = eindex + batch_size
        sindex = temp

        yield batch_data

    if eindex > len(train_data) and sindex < len(train_data):
        batch_data = train_data[sindex:]
        sindex += batch_size
        yield batch_data


def k_fold(batch_size, train_data):
    # random.shuffle(train_data)
    sindex = 0
    eindex = batch_size

    while eindex < len(train_data):
        batch_data = train_data[sindex : eindex]
        temp = eindex
        eindex = eindex + batch_size
        sindex = temp

        yield batch_data


def format_data(train_data):
    train_format = []
    for x, y in zip(*train_data):
        train_format.append([x,y])

    return train_format


def pack_seqs(seqs):
    lens = []
    for i in seqs:
        lens.append(sum(1 if token !=0 else 0 for token in i))
    return lens


def load_data(train_data='train_data_p.pkl', test_data='test_data_p.pkl'):
    train_data = pickle.load(open(BASE_DIR + train_data, 'rb'))
    word2ix = pickle.load(open(BASE_DIR + 'word_index.pkl', 'rb'))
    tag2ix = pickle.load(open(BASE_DIR + 'tag2ix.pkl', 'rb'))
    ix2tag = pickle.load(open(BASE_DIR + 'ix2tag.pkl', 'rb'))
    ix2dist = pickle.load(open(BASE_DIR + 'ix2dist.pkl', 'rb'))
    test_data = pickle.load(open(BASE_DIR + test_data, 'rb'))

    return train_data, word2ix, ix2tag, test_data, ix2dist

def is_head(tag):
    return tag.split('_')[-1][-1] == 'B' or tag.split('_')[-1][-1] == 'S'


def is_paired(item1, item2):
    if int(item1[-2]) - int(item2[-2]) == 1 or int(item1[-2]) - int(item2[-2]) == -1:
        return True
    return False


def gen_distance(data, ix2tag):
    lens_set = {}
    ix2dist = {0:0}
    data_with_d = []
    index = 0

    for sent, s_tag in data:
        s_dist = [0 for i in range(len(s_tag))]
        ent_head = {}

        for i, t_ix in enumerate(s_tag):
            if t_ix == 0:
                break
            tag = ix2tag[t_ix]
            if is_head(tag):
                ent_head[i] = tag
        index += 1
        # print(index, ent_head)

        all_items = list(ent_head.items())
        for j, item in enumerate(all_items):
            for item2 in all_items[j+1:]:
                if is_paired(item[1], item2[1]):
                    dist = item2[0] - item[0]
                    s_dist[item[0]] = dist
                    s_dist[item2[0]] = -dist

                    if dist not in ix2dist.values():
                        ix2dist[len(ix2dist)] = dist

                    if abs(dist) not in lens_set:
                        lens_set[abs(dist)] = 1
                    else:
                        lens_set[abs(dist)] += 1
                    all_items.remove(item2)
                    break
        data_with_d.append([sent, s_tag, s_dist])
        index += 1

    print(lens_set)
    # pickle.dump(data_with_d, open(BASE_DIR + 'train_data_d.pkl', 'wb'))
    # pickle.dump(data_with_d, open(BASE_DIR + 'test_data_d.pkl', 'wb'))
    # pickle.dump(lens_set, open(BASE_DIR+'dist_count.pkl', 'wb'))


def test_pair():
    a = [2, 2, 2, 2, 3, 3]
    pair = []

    for ix, i in enumerate(a):
        print(i)
        a_c = a[ix + 1:]
        for j in a_c:
            if j == i:
                print(i, j)
                pair.append((i, j))
                a.remove(j)
                break

    print(pair)


def get_train_weight(train_data, ix2tag):
    tag_count = {i:0 for i in range(163)}
    for sent in train_data:
        tag = sent[1]
        for t in tag:
            tag_count[t] += 1
    print(tag_count)
    print('complete')


def gen_dist_dict():
    ix2dist = {i+1:dist for i, dist in enumerate(range(-49, 50))}
    print(ix2dist)
    pickle.dump(ix2dist, open(BASE_DIR + 'ix2dist.pkl', 'wb'))


def data2ix(file_name="train_data_d", ix2dist={}):
    data = pickle.load(open(BASE_DIR+file_name+".pkl", 'rb'))
    dist2ix = {v:k for k,v in ix2dist.items()}
    dist_data = [i[-1] for i in data]
    for j, sent in enumerate(dist_data):
        for i, t in enumerate(sent):
            if t != 0:
                dist_data[j][i] = dist2ix[t]

    for i in range(len(data)):
        data[i][-1] = dist_data[i]

    print(dist_data)
    pickle.dump(data, open(BASE_DIR + file_name+"2.pkl", 'wb'))



if __name__ == '__main__':
    # wv = get_embedding_matrix(word_index=pickle.load(open(BASE_DIR + 'word_index.pkl', 'rb')))
    # traindata, testdata, source_W, source_vob, sourc_idex_word, target_vob, target_idex_word, max_s, k \
    #     = cPickle.load(open(eelstmfile, 'rb'))
    # data = pickle.load(open(BASE_DIR + 'e2edata.pkl', 'rb'), encoding='iso-8859-1')
    # pickle.dump(data[2], open(BASE_DIR+'wv_matrix.pkl', 'wb'))
    # test_data = pickle.load(open(BASE_DIR+'test_data_d.pkl', 'rb'))
    # dist_count = pickle.load(open(BASE_DIR+'dist_count.pkl', 'rb'))
    # print(test_data[0])
    # print(test_data[1])
    ix2tag = pickle.load(open(BASE_DIR+'ix2dist.pkl', 'rb'))
    # get_train_weight(test_data, ix2tag)
    # gen_distance(test_data, ix2tag)
    data2ix("test_data_d", ix2tag)
    # train_data = data[0]
    # test_data = data[1]
    # ix2word = data[4]
    # tag2ix = data[5]
    # ix2tag = data[6]




