import os.path
import logging
import numpy
from gensim.models import word2vec
from torch.utils.data import DataLoader, Dataset
import jieba
import torch
import json
import math

print("加载四万首唐诗中, 请等待")
with open('./corpus/quantangshi_emerson_20180127.json', encoding="utf-8") as f:
    poems = json.load(f)
poemsWE = [poem[4] for poem in poems]
poemsRNN = [poem[4] for poem in poems if len(poem[4]) <= 60]
# with open('./corpus/poems.txt', mode='r', encoding='utf-8') as txt:
#     # 以列表形式返回所有行.
#     poems = txt.readlines()
# 停用词, 指对句子意思没有实质影响的那类词
stop_words = ['，', '。', "\n"]


def clean_cut_poem(p):
    # 删除头尾的指定字符, 默认为空格/换行符
    p = p.strip()
    p = jieba.cut(p)
    p = [word for word in p if word not in stop_words]
    return p


max_len = 0
corpusWE = []
corpusRNN = []
for poem in poemsWE:
    seg = clean_cut_poem(poem)
    corpusWE.append(seg)
for poem in poemsRNN:
    seg = clean_cut_poem(poem)
    if len(seg) > max_len:
        max_len = len(seg)
    # append保留列表框
    corpusRNN.append(seg)

addStop_corpusWE = []
addStop_corpusRNN = []
for poem in corpusWE:
    poem = poem + ['E']
    addStop_corpusWE.append(poem)
for poem in corpusRNN:
    dis = max_len - len(poem)
    poem += dis * ["E"] + ["E"]
    addStop_corpusRNN.append(poem)

print("加载结束!")

trcuncateId = math.floor(0.05*len(addStop_corpusRNN))
addStop_corpusRNN = addStop_corpusRNN[:trcuncateId]

if not os.path.exists("./modeloutput/poem.model"):
    print("开始词嵌入训练, 请等待\n")
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    word_encoder = word2vec.Word2Vec(addStop_corpusRNN, vector_size=150, epochs=100, min_count=1,
                                     workers=8, sg=1
                                     )
    # 下面几句用在load模型之后, 进行增量训练情况.
    # word_encoder = word2vec.Word2Vec.load("./preTrainning/baike_26g_news_13g_novel_229g.model")
    # word_encoder.build_vocab(addStop_corpus, update=True)
    # word_encoder.train(addStop_corpus)
    # 用.save接口保存的话, 有三个文件corpus.model corpus.model.wv.vectors.npy corpus.model.trainables.syn1neg.npy
    num_of_words = len(word_encoder.wv)
    # words = word_encoder.wv.index_to_key
    print("词嵌入训练结束!")
    word_encoder.save("./modeloutput/poem.model")
    print("\n 词库量: {}, 词维度: {}, 训练: {}\n".format(len(word_encoder.wv.index_to_key),
                                                       word_encoder.wv.vector_size,
                                                       "skip-gram"))
else:
    word_encoder = word2vec.Word2Vec.load("./modeloutput/poem.model")

class custom_data(Dataset):
    def __init__(self, data):
        self.corpus = data
        self.index_of_E = word_encoder.wv.key_to_index['E']

    def __getitem__(self, item):
        p = self.corpus[item]
        input = [word_encoder.wv.get_vector(word) for word in p]
        labels = [word_encoder.wv.key_to_index[word] for word in p]
        input = input[:len(input) - 1]
        labels = labels[1:]
        input = torch.tensor(numpy.array(input))
        labels = torch.tensor(numpy.array(labels))
        return input, labels

    def __len__(self):
        return len(self.corpus)


my_data = custom_data(addStop_corpusRNN)
my_dataloader = DataLoader(dataset=my_data, batch_size=32, shuffle=True, drop_last=False)


def get_data():
    return my_dataloader, word_encoder, max_len, [word_encoder.wv.key_to_index["E"]]


