import csv

import platform
import random

import gensim
import numpy as np
import sys, os
sys.path.append(os.pardir)
from Utils import Word_Dict_Mapping


def vec2tensor_3D(name,project_path):
    if (platform.system() == 'Windows'):
        embedding_path = "D:\\embedding\\"+name+".bin"
        csv_name = "D:\\DataSet\\" + name + ".csv"
    else:
        embedding_path = project_path+"/embedding/"+name+".bin"
        csv_name = project_path+"/DataSet/" + name + ".csv"
    word_dict = Word_Dict_Mapping()
    # 获取字典
    word_dict.loader_from_csv(name,project_path)
    # 获取word2vec向量字典
    word2vec = gensim.models.KeyedVectors.load_word2vec_format(embedding_path, binary=True)
    f = open(csv_name, 'r', encoding='utf-8')
    csv_reader = csv.reader(f)

    sentences = []
    labels = []
    for row in csv_reader:
        sentences.append(row[1:])
        labels.append(row[0])
    f.close()
    n = 50
    # 存储词向量
    sent_tmp=[]
    # 存储词编号
    line_in_dict = []
    for row in sentences:
        word_tmp = []
        dict_tmp = []
        if len(row) > 50:
            random.shuffle(row)
            row = row[:50]
        else:
            pad = np.zeros((50-len(row),50),dtype=np.float32)
            pad_dict = np.zeros(50-len(row),dtype=np.float32)
            word_tmp.extend(pad)
            dict_tmp.extend(pad_dict)
        for word in row:
            word_tmp.append(word2vec[word])
            dict_tmp.extend(word_dict.dict[word])
        sent_tmp.append(word_tmp)
        line_in_dict.append(dict_tmp)
    print(len(sent_tmp[0]),len(sent_tmp[1]),len(sent_tmp[2]),len(sent_tmp[3]))
    sent_tensor = np.asarray(sent_tmp).astype(float)
    print(sent_tensor.shape)
    sent_num = np.asarray(line_in_dict).astype(int)
    print(sent_num.shape)
    path = project_path+"/vec2tensor/w2vtensor.npy"
    print(path)
    np.save(path,sent_tensor)
    path = project_path+"/vec2tensor/w2num.npy"
    np.save(path,sent_num)
    path = project_path+"/vec2tensor/labels.npy"
    label_tensor = np.asarray(list(map(int, labels))).astype(int)
    np.save(path,label_tensor)

    print("tensors saved!")
    del word_tmp,sent_tensor,n,sentences,labels,csv_reader,csv_name,sent_num
