# coding:utf-8
"""生成词典"""
# 中文版处理的word2vec
import os
import codecs
from gensim import models
import logging

# 引入日志配置
from utils.util import ensure_dir, read_txt_by_tab

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)


class MySentences(object):
    def __init__(self, dirname):
        self.dirname = dirname

    def __iter__(self):
        # 遍历对应目录下所有文件
        for filename in os.listdir(self.dirname):
            file_path = self.dirname + "/" + filename
            for line in open(file_path, encoding='utf-8'):
                # 以空格进行分割词语
                words = line.split(" ")
                result_word = []
                for word in words:
                    if word and word != '\n':
                        result_word.append(word)
                yield result_word


def train_w2v_model(corpus_dir, size=100, window=2, min_count=5):
    sentences = MySentences(corpus_dir)
    # 对sentences表示的语料库进行训练，sg默认为0表示CBOW算法，训练200维的词向量，窗口大小设置为3，最小词频设置为20
    model = models.Word2Vec(sentences, size=size, workers=10, window=window, min_count=min_count)
    return model


def save_wv(model, wv_file):
    # 获取词汇
    words = model.wv.index2word
    # 获取对应词向量
    vectors = model.wv.vectors
    print('vectors长宽：', len(vectors), len(vectors[0]))  # 52800 64   64为设置的size，即词向量维度
    vocab_dict = dict(zip(words, vectors))  # 词典，每个词对应一个词向量
    with codecs.open(wv_file, 'w', 'utf-8') as f:
        f.write('{} {}\n'.format(len(vectors), len(vectors[0])))
        for k, v in vocab_dict.items():
            f.write(k + ' ' + ' '.join(str(i) for i in v) + '\n')
    f.close()


def save_w2v_model(corpus_dir="data/content_wordcut", modelfile="./model/all_verse_jieba.zh_word2vec", size=100, window=2, min_count=10):
    # corpus_dir = "data/content_wordcut_no_punctuation"
    sentences = MySentences(corpus_dir)
    # 对sentences表示的语料库进行训练，sg默认为0表示CBOW算法，训练200维的词向量，窗口大小设置为3，最小词频设置为20
    model = models.Word2Vec(sentences, size=size, workers=10, window=window, min_count=min_count)
    # 保存模型
    model.save(modelfile)


def save_vocab(model_file: str, vocab_file: str):
    # 使用模型
    model = models.Word2Vec.load(model_file)
    # 获取词汇
    words = model.wv.index2word
    print('words长度：', len(words))  # 长度52800
    # 获取对应词向量
    vectors = model.wv.vectors
    print('vectors长宽：', len(vectors), len(vectors[0]))  # 52800 64   64为设置的size，即词向量维度
    # vocab_dict = model.wv.vocab  # 词典，每个词对应一个词向量
    vocab_dict = dict(zip(words, vectors))  # 词典，每个词对应一个词向量
    f = codecs.open(vocab_file, 'w', 'utf-8')
    f.write('{} {}\n'.format(len(vectors), len(vectors[0])))
    for k, v in vocab_dict.items():
        f.write(k + ' ' + ' '.join(str(i) for i in v) + '\n')
    f.close()


def read_vocab(vocab_dir):
    # 读取词汇表
    words, vectors = read_txt_by_tab(vocab_dir)
    vectors = [[float(i) for i in v] for v in vectors]
    vocab_dict = dict(zip(words, vectors))  # 词典，每个词对应一个词向量
    print('读取vocab文件', len(words), len(vectors[0]), vectors[0])
    return vocab_dict
    # for line in open('data/vocab.txt', mode='r', encoding='utf-8'):
    #     # 以空格进行分割词语
    #     word, vector = line.strip().split("\t")
    #     vector = list(vector)


if __name__ == '__main__':
    vocab_dir = 'data/vocab.txt'
    # save_vocab(vocab_dir)
    read_vocab(vocab_dir)

# 读取json语料库并处理为gensim可处理的字符串
# data = dataHandler.parseRawData()  # All if author=None
# data = dataHandler.parseRawData(author="李白",constrain=5)
# print(len(data))
# print(type(data))
# seg_list = []  # 切分好的字向量和词向量
# sentences = ''
# sentences_list = []
# paragraphs_list = []
# for p in data:
#     text_list = re.findall(r'[\u4e00-\u9fff]+', p)  # 找到所有汉字
#     for text in text_list:
#         # 将单句进行分词，用一个空格分隔
#         sentence = ' '.join(jieba.lcut(text))
#         sentences_list.append(sentence)
#     sentences = '   '.join(sentences_list)  # 一首诗的预处理结果
#     paragraphs_list.append(sentences)
# paragraphs = '    \n'.join(paragraphs_list)
# print(paragraphs)
# f = open('data/all.txt','w')
# f.write(paragraphs)
# f.close()


'''
# data = dataHandler.parseRawData(author="李白",constrain=5)  # All if author=None
# random.shuffle(data)
# for s in data:
    # print(s)
# word_to_ix = {}

for sent in data:
    for word in sent:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
word_to_ix['<EOP>'] = len(word_to_ix)
word_to_ix['<START>'] = len(word_to_ix)

VOCAB_SIZE = len(word_to_ix)

print "VOCAB_SIZE:", VOCAB_SIZE
print "data_size", len(data)

for i in range(len(data)):
    data[i] = toList(data[i])
    data[i].append("<EOP>")
# save the word dic for sample method
p.dump(word_to_ix, file('wordDic', 'w'))

# save all avaible word
# wordList = open('wordList','w')
# for w in word_to_ix:
#     wordList.write(w.encode('utf-8'))
# wordList.close()

model = PoetryModel(len(word_to_ix), 256, 256);
model.cuda()  # running on GPU,if you want to run it on CPU,delete all .cuda() usage.
optimizer = optim.RMSprop(model.parameters(), lr=0.01, weight_decay=0.0001)
criterion = nn.NLLLoss()

one_hot_var_target = {}
for w in word_to_ix:
    one_hot_var_target.setdefault(w, make_one_hot_vec_target(w, word_to_ix))

epochNum = 10
TRAINSIZE = len(data)
batch = 100
def test():
    v = int(TRAINSIZE / batch)
    loss = 0
    counts = 0
    for case in range(v * batch, min((v + 1) * batch, TRAINSIZE)):
        s = data[case]
        hidden = model.initHidden()
        t, o = makeForOneCase(s, one_hot_var_target)
        output, hidden = model(t.cuda(), hidden)
        loss += criterion(output, o.cuda())
        counts += 1
    loss = loss / counts
    print "=====",loss.data[0]
print "start training"
for epoch in range(epochNum):
    for batchIndex in range(int(TRAINSIZE / batch)):
        model.zero_grad()
        loss = 0
        counts = 0
        for case in range(batchIndex * batch, min((batchIndex + 1) * batch, TRAINSIZE)):
            s = data[case]
            hidden = model.initHidden()
            t, o = makeForOneCase(s, one_hot_var_target)
            output, hidden = model(t.cuda(), hidden)
            loss += criterion(output, o.cuda())
            counts += 1
        loss = loss / counts
        loss.backward()
        print epoch, loss.data[0]
        optimizer.step()
    test()
torch.save(model, 'poetry-gen.pt')
'''
