import json
import linecache
import logging
import sys

import gensim
import numpy as np
from tqdm import tqdm

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)


def load_json(url):
    with open(url) as jsonFile:
        data = json.load(jsonFile)
        return data


def load_text(url):
    text = []
    file = linecache.getlines(url)
    for line in file:
        text.append(line.split())
    return text


# 得到纯字符集
def stopwords(sent):
    # pattern = re.compile(u"[\u4e00-\u9fa5]+")
    new_sent = []
    for word in sent:
        if not word in stopword:
            new_sent.append(word)
    return new_sent


def train():
    text = load_text('out/corpus_no_stopwords.txt')
    dictionary = gensim.corpora.Dictionary(text)
    corpus = [dictionary.doc2bow(t) for t in text]
    tfidf = gensim.models.TfidfModel(corpus)
    tfidf.save('tfidf_model')


def load_model(url):
    return gensim.models.TfidfModel.load(url, mmap='r')


def distance(corpus, model, testset):
    text = load_text(corpus)
    dictionary = gensim.corpora.Dictionary(text)
    model = load_model(model)
    test_set = load_json(testset)
    label = []
    word_size = len(dictionary.token2id.keys())

    pbar = tqdm(total=len(test_set))
    pbar.set_description('distance')
    pbar.update()
    for cqa in test_set:
        context = ' '.join(cqa['context']).split()
        query = cqa['query'].split()
        reply = cqa['reply']
        score = []

        for r in reply:
            sent = query + r.split()
            context_v = sent2vec(context, dictionary, model, word_size)
            sent_v = sent2vec(sent, dictionary, model, word_size)
            score.append(cos(context_v, sent_v))

        label.append(score.index(max(score)))
        pbar.update(1)
    return label


def cos(x, y):
    a = np.array(x)
    b = np.array(y)
    return np.inner(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))


# def sent2vec(sent, dictionary, model):
#     doc2bow = dictionary.doc2bow(sent)
#     result = model[doc2bow]
#     return [s[1] for s in result]


def sent2vec(sent, dictionary, model, word_size):
    doc2bow = dictionary.doc2bow(sent)
    result = model[doc2bow]
    wordvec = [0.] * word_size
    for item in result:
        wordvec[item[0]] = item[1]
    return wordvec


def write2file(str, path):
    with open(path, 'a') as file:
        file.writelines(str)
        file.writelines('\n')


if __name__ == '__main__':
    infile = sys.argv[1]
    # stopword = load_json('data/stopwords.json')
    labels = distance('out/corpus_no_stopwords.txt', 'tfidf_model', 'raw/{}.json'.format(infile))
    for l in labels:
        write2file(str(l), 'out/{}.txt'.format(infile))
    # model = load_model('tfidf_model')
    # text = load_text('out/corpus_no_stopwords.txt')
    # dictionary = gensim.corpora.Dictionary(text)
    # print(sent2vec(['谢谢','谢谢','你你你'], dictionary,model))
