"""
variable:
    tag of words



"""

import json
import os.path

import pandas as pd
import re
import jieba.posseg as psg

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation

from sklearn.cluster import KMeans
from sklearn.decomposition import PCA


import matplotlib.pyplot as plt
import jieba
from PIL import Image
import numpy as np


mask = np.array(Image.open("../data/plane.png"))

# file operation
encoding = "utf-8"

# tokenize
word_list_path = "../assist_data/add_word_list.txt"
stop_word_list_path = "../assist_data/stop_word_list.txt"
flag_list = ['n', 'nz', 'vn']   # parts of speech
data_path = "../data/search_spider.jsonl"

# tf-idf
feature_word_cnt = 1000

# lda
max_topic_cnt = 10
topic_cnt = 8
top_word_cnt = 25

# kmeans
cluster_cnt = 10

def chinese_word_cut(text):
    jieba.load_userdict(word_list_path)
    jieba.initialize()
    try:
        stopword_list = open(stop_word_list_path, encoding=encoding)
    except FileNotFoundError as e:
        stopword_list = []
        # print("error in stop_file")

    stop_list = []
    for word in stopword_list:
        word = re.sub(u'\n|\r', '', word)
        stop_list.append(word)

    word_list = []
    # jieba分词
    seg_list = psg.cut(text)
    for seg_word in seg_list:
        word = re.sub(u'[^\u4e00-\u9fa5]', '', seg_word.word)
        find = 0
        for stop_word in stop_list:
            if stop_word == word or len(word) < 2:  # this word is stopword
                find = 1
                break
        if find == 0 and seg_word.flag in flag_list:
            word_list.append(word)
    return " ".join(word_list)


def read_json():
    json_lines = open(data_path, encoding=encoding).readlines()

    data = {"time": [], "content": []}

    for line in json_lines:
        tweet = json.loads(line)
        data["time"].append(tweet["created_at"])
        data["content"].append(tweet["content"])

    data = pd.DataFrame(data)

    return data


def print_top_words(model, feature_names, n_top_words):
    top_words = []
    for topic_idx, topic in enumerate(model.components_):
        print("Topic #%d:" % topic_idx)
        topic_w = " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
        top_words.append(topic_w)
        print(topic_w)
    return top_words


def do_lda(data):
    n_features = feature_word_cnt  # 提取1000个特征词语
    tf_vectorizer = CountVectorizer(strip_accents='unicode',
                                    max_features=n_features,
                                    stop_words='english',
                                    max_df=0.5,
                                    min_df=10)
    tf = tf_vectorizer.fit_transform(data["tokenized"])

    n_topics = 8
    lda = LatentDirichletAllocation(n_components=n_topics, max_iter=50,
                                    learning_method='batch',
                                    learning_offset=50,
                                    # doc_topic_prior=0.1,
                                    # topic_word_prior=0.01,
                                    random_state=0)
    lda.fit(tf)

    n_top_words = top_word_cnt
    tf_feature_names = tf_vectorizer.get_feature_names_out()
    topic_word = print_top_words(lda, tf_feature_names, n_top_words)

    print(topic_word)

    topics = lda.transform(tf)
    return topics
    # visualize
    # pic = pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)
    # pyLDAvis.save_html(pic, '../out/lda.html')
    ## pyLDAvis.show(pic)


def get_perplexity(data):

    n_features = feature_word_cnt  # 提取1000个特征词语
    tf_vectorizer = CountVectorizer(strip_accents='unicode',
                                    max_features=n_features,
                                    stop_words='english',
                                    max_df=0.5,
                                    min_df=10)
    tf = tf_vectorizer.fit_transform(data["tokenized"])

    plexs = []
    scores = []
    n_max_topics = max_topic_cnt
    for i in range(1,n_max_topics):
        lda = LatentDirichletAllocation(n_components=i, max_iter=50,
                                        learning_method='batch',
                                        learning_offset=50,random_state=0)
        lda.fit(tf)
        plexs.append(lda.perplexity(tf))
        scores.append(lda.score(tf))
        print(i, plexs[-1], scores[-1])

    n_t = n_max_topics - 1  # 区间最右侧的值。注意：不能大于n_max_topics
    x = list(range(1, n_t + 1))
    plt.plot(x, plexs[0:n_t])
    plt.xlabel("number of topics")
    plt.ylabel("perplexity")
    plt.show()


def do_kmeans(topics):
    kmeans = KMeans(cluster_cnt, random_state=0)
    kmeans.fit(topics)  # 训练模型
    labels = kmeans.predict(topics)  # 预测分类

    # visualize
    reduced_topics = reduce_to_k_dim(topics)
    print(reduced_topics)
    plt.scatter(reduced_topics[:, 0], reduced_topics[:, 1], c=labels, s=40, cmap='viridis')
    plt.show()

    # Get the most related topics
    label = [None] * 10
    value = [None] * 10
    idx1 = [None] * 10
    idx2 = [None] * 10
    for i in range(10):
        label[i] = topics[np.where(labels == i)]
        value[i] = np.sum(label[i], axis=0)
        idx1[i] = np.argmax(value[i])
        idx2[i] = np.argsort(value[i])[-2]
        print(np.sort(value[i]))
    print(idx1)
    print(idx2)


def reduce_to_k_dim(data, k=2):
    # svd = TruncatedSVD(n_components=k)
    # data_reduced = svd.fit_transform(data)
    pca = PCA(n_components=k)
    data_reduced = pca.fit_transform(data)
    return data_reduced


def create_word_cloud(data):
    text = " ".join(data["tokenized"])
    wordcloud = WordCloud(background_color="white",
                          width=800,
                          height=600,
                          max_words=200,
                          max_font_size=80,
                          mask=mask,
                          contour_width=3, \
                          contour_color='steelblue',
                          font_path="../data/ch.ttf"
                          ).generate(text)
    wordcloud.to_file('../out/word_cloud.png')


if __name__ == '__main__':

    data = None

    if os.path.exists("../data/tokenized.xlsx"):
        data = pd.read_excel("../data/tokenized.xlsx")
    else:
        data = read_json()
        data["tokenized"] = data.content.apply(chinese_word_cut)
        # use different method
        data = data.drop(data.index[data["tokenized"] == ""])

        data.to_excel("../data/tokenized.xlsx", index=False)

    # print(data)
    data = data.dropna()

    create_word_cloud(data)
    # get_perplexity(data)
    # topics = do_lda(data)
    # do_kmeans(topics)



