from gensim.corpora import WikiCorpus
import jieba
import codecs
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import multiprocessing
import json
import random


def dataprocess(xml_file, txt_file):
    '''  
    读取中文wiki语料库，并解析提取xml中的内容  
    '''
    space = " "
    i = 0
    output = open(txt_file, 'w', encoding='utf-8')
    wiki = WikiCorpus(xml_file, lemmatize=False, dictionary={})
    for text in wiki.get_texts():
        output.write(space.join(text)+"\n")
        i = i+1
        if(i % 10000 == 0):
            print('Saved '+str(i)+' articles')
    output.close()
    print('Finished Saved '+str(i)+' articles')


def createstoplist(stoppath):
    '''  
    加载停用词表  
    '''
    print('load stopwords...')
    stoplist = [line.strip() for line in codecs.open(
        stoppath, 'r', encoding='utf-8').readlines() if len(line.strip()) < 2]
    stopwords = {}.fromkeys(stoplist)
    return stopwords


def isAlpha(word):
    """
    过滤英文  
    """
    try:
        return word.encode('ascii').isalpha()
    except UnicodeEncodeError:
        return False


def trans_seg(jian_file, seg_file):
    '''
    中文分词
    '''
    stopwords = createstoplist('.\data\stopword.txt')
    i = 0
    with codecs.open(seg_file, 'w', 'utf-8') as wopen:
        print('开始...')
        with codecs.open(jian_file, 'r', 'utf-8') as ropen:
            for line in ropen.readlines():
                line = line.strip()
                i += 1
                if i % 500 == 0:
                    print('line '+str(i))
                text = ''
                for char in line.split():
                    if isAlpha(char):
                        continue
                    text += char
                # print(text)
                words = jieba.cut(text)
                seg = ''
                for word in words:
                    if word not in stopwords:
                        if len(word) > 1 and isAlpha(word) == False:  # 去掉长度小于1的词和英文
                            if word != '\t':
                                seg += word+' '
                wopen.write(seg+'\n')
    print('结束!')


def word2vec(rawdata, modelpath):
    '''  
    利用gensim中的word2vec训练词向量  
    '''
    print('Start...')
    # vectorpath='E:\word2vec\vector'
    model = Word2Vec(LineSentence(rawdata), size=400, window=5, min_count=5,
                     workers=multiprocessing.cpu_count())  # 参数说明，gensim函数库的Word2Vec的参数说明
    model.save(modelpath)
    # model.wv.save_word2vec_format(vectorpath,binary=False)
    print("Finished!")


def export_word2vec(modelpath, embeddingpath):
    '''
    导出词向量
    '''
    model = Word2Vec.load(modelpath)
    model.wv.save_word2vec_format(embeddingpath, binary=False)


def wordsimilarity(model_file):
    '''
    训练结果测试
    '''
    model = Word2Vec.load(model_file)
    semi = ''
    try:  # 输出与中国相关的前10个词语及其相似度
        semi = model.similarity('羊毛', '皮草')  # python3以上就不需要decode
    except KeyError:
        print('The word not in vocabulary!')
    print(semi)
    # for term in semi:
    #     print('%s,%s' % (term[0], term[1]))


def zhwiki_model_train():
    bz_file = '.\data\zhwiki-latest-pages-articles.xml.bz2'
    txt_file = '.\data\zhwiki-latest-pages-articles.txt'
    jian_file = ".\data\zhwiki-articles-jian.txt"
    seg_file = '.\data\zhwiki-segment40.txt'
    model_file = ".\word2vec\zhwikimodel.model"
    word2vec_file = ".\word2vec\zhwikimodel.word2vec"

    # dataprocess(bz_file, txt_file)
    # opencc -i zhwiki-articles.txt -o zhwiki-articles-jian.txt -c t2s.json
    trans_seg(jian_file, seg_file)
    word2vec(seg_file, model_file)
    wordsimilarity(model_file)
    export_word2vec(model_file, word2vec_file)


def create_train_data():
    res = []
    # with codecs.open("./data/train_factoid_1.json", mode="r", encoding="utf-8") as rf:
    #     for line in rf.readlines():
    #         rec = json.loads(line)
    #         res.append((rec["query"], rec["answer"]))
    for file in ["me_train", "me_test.ir", "me_validation.ann"]:
        with codecs.open("./data/WebQA/{}.json".format(file), mode="r", encoding="utf-8") as rf:
            qas = json.load(rf)
            for qn in qas.keys():
                qa = qas[qn]
                eds = qa["evidences"]
                for edk in eds.keys():
                    ed = eds[edk]
                    if ed["answer"][0] == "no_answer":
                        continue
                    if ed["answer"][0] in ed["evidence"]:
                        res.append((qa["question"], "{}. {}".format(
                            ",".join(ed["answer"]), ed["evidence"])))
                        break

    with codecs.open("./data/train.data", mode="w", encoding="utf-8") as rf:
        for r in res:
            rf.write("    ".join([str(x) for x in r]))
            rf.write("\n")


if __name__ == "__main__":
    # import QaData
    # embeddingFile = "word2vec/zhwikimodel.word2vec"
    # trainingFile = "data/testing.data"
    # embedding, word2idx = QaData.loadEmbedding(embeddingFile)
    # # with codecs.open(trainingFile, mode="r", encoding="utf-8") as rf:
    # #     for line in rf.readlines():
    # #         sei = QaData.sentenceToIndex(line, word2idx, 20)
    # #         print(line)
    # #         print(sei)
    # #         input("continue?(y/n)")
    # model = Word2Vec.load("word2vec/zhwikimodel.model")
    # while True:
    #     sentence = input("input sentence:")
    #     for w in jieba.cut(sentence):
    #         try:  # 输出与中国相关的前10个词语及其相似度
    #             semi = model.most_similar(w, topn=3)  # python3以上就不需要decode
    #         except KeyError:
    #             # print('The word not in vocabulary!')
    #             continue
    #         print(w, semi, word2idx[w])
    #     sei = QaData.sentenceToIndex(sentence, word2idx, 20)
    #     print(sei)
    # QaData.sentenceToIndex(
    #     "以前那个吃蘑菇的游戏叫什么", word2idx, 10)
    # model = Word2Vec.load("word2vec/zhwikimodel.model")
    # for w in ["以前", "那个", "吃", "蘑菇", "游戏", "叫", "什么"]:
    #     try:  # 输出与中国相关的前10个词语及其相似度
    #         semi = model.most_similar(w)  # python3以上就不需要decode
    #     except KeyError:
    #         print('The word not in vocabulary!')
    #         continue
    #     print(semi)
    create_train_data()
