import json
import os
import random
from collections import Counter

import PIL.Image as image
import matplotlib.pyplot as plt
import numpy as np
from jieba import cut
from sklearn import cluster
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud


def mkdir(path):
    """
    用于创建输出目录
    """

    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)


def text_import(path):
    """
    导入文件，拆分成行，去除换行符；
    返回整个文档按行分割的列表。
    """

    with open(path, "r", encoding="utf-8") as f:
        s = f.read()
        s = s.replace("$", " $")
        s = s.replace("#", " #")
        s = s.replace("@", " @")
        lines = s.split("\n")
    return lines


def segment(lines, stopwords_path, language="en-US"):
    """
    按语言对文本进行分词并去掉停用词；
    返回一个列表，列表中的每个元素为每一行的分词结果。
    """

    stopwords_set = set([x.lower() for x in text_import(stopwords_path)])
    segment_result = []
    for line in lines:
        # 分词
        if language == "en-US":
            line1 = line.replace("http", " http")
            wordlist_pre = line1.split()
        elif language == "zh-CN":
            wordlist_pre = list(cut(line))
        # 去掉停用词
        wordlist = []
        for word in wordlist_pre:
            if not (
                    word[0] in ["$", "#", "@"]
                    or word[0:4] == "http"
                    or word.lower() in stopwords_set
            ):
                wordlist.append(word.lower())
        segment_result.append(" ".join(wordlist))
    return segment_result


def word_stat(segment_result, output_path):
    """
    统计词频；
    返回一个字典：键为词语，值为频数。
    输出词频排名前15的词语
    """

    word_list = []
    for x in segment_result:
        word_list += x.split()
    word_count = Counter(word_list)

    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.rcParams["axes.unicode_minus"] = False

    count_max = word_count.most_common(15)
    words = [x[0] for x in count_max]
    freq = [x[1] for x in count_max]
    plt.barh(words, freq)
    plt.xlabel("频数")
    plt.ylabel("词语")
    plt.savefig("{}.jpg".format(output_path))

    return word_count


def word_cloud(word_count, output_path, mask_path):
    """
    输入词频统计的字典和输出路径，输出词云图片
    """

    mask = np.array(image.open(mask_path))
    w = WordCloud(
        mask=mask,
        font_path="苹方黑体-细-简.ttf",
        max_words=40,
        background_color="white",
        font_step=1,
    )
    w.generate_from_frequencies(word_count)
    w.to_file("{}.png".format(output_path))


def feature_set(segment_result, output_path):
    """
    输入分词结果；
    使用sklearn库的TfidfVectorizer计算TF-IDF权重矩阵；
    输出TF-IDF权重的稀疏矩阵，作为特征集；
    """

    tfidf_vectorizer = TfidfVectorizer(
        decode_error="ignore", stop_words="english", max_df=0.2
    )
    tfidf_fit = tfidf_vectorizer.fit_transform(segment_result)

    result = tfidf_fit.tocoo()
    feature_set_out = zip(
        map(int, result.row), map(int, result.col), map(float, result.data)
    )
    with open(output_path, "w") as f:
        json.dump(list(feature_set_out), f)
    print("Feature_set has been output.")

    vec_repre = np.asarray(tfidf_fit.toarray())
    return vec_repre


def find_gravity(vec_matrix, output_path):
    """
    计算各行的重心位置
    """

    gravity = []
    length_vec = np.asarray(range(len(vec_matrix[0])))
    for vec in vec_matrix:
        gravity.append(np.dot(vec, length_vec) / sum(vec))

    with open(output_path, "w") as f:
        json.dump(gravity, f)
        print("Gravities have been output.")

    return gravity


def distinct_example(vec_matrix, output_path, examples_num=5):
    """
    随机任取2个向量，重复5次，分别计算欧氏距离，输出结果
    """

    random.seed()
    n = len(vec_matrix)
    examples = []
    for i in range(5):
        m = random.choices(range(n), k=2)
        l = np.dot(vec_matrix[m[0]], vec_matrix[m[1]]) / (
                np.linalg.norm(vec_matrix[m[0]]) * (np.linalg.norm(vec_matrix[m[1]]))
        )
        k = np.sqrt(np.sum(np.square(vec_matrix[m[0]] - vec_matrix[m[1]])))
        examples.append([m[0], m[1], float(k), float(l)])

    with open(output_path, "w") as f:
        json.dump(examples, f)
        print("Distinct examples have been output.")


def k_cluster(vec_matrix, output_path, segment_result, mask_path):
    """
    根据文档的向量矩阵进行二聚类
    """

    k_means = cluster.KMeans(n_clusters=2)
    k_means.fit(vec_matrix)
    result = k_means.labels_

    cluster_result_path = output_path + "/cluster"
    mkdir(cluster_result_path)
    with open(cluster_result_path + "/cluster_result.json", "w") as f:
        json.dump(list(map(int, result)), f)
        print("Cluster result have been output.")

    segment_0 = [segment_result[x] for x in range(len(result)) if not result[x]]
    segment_1 = [segment_result[x] for x in range(len(result)) if result[x]]

    word_count_0 = word_stat(segment_0, cluster_result_path + "/wordstat0")
    word_cloud_0 = word_cloud(word_count_0, cluster_result_path + "/wordcloud0", mask_path)

    word_count_1 = word_stat(segment_1, cluster_result_path + "/wordstat1")
    word_cloud_1 = word_cloud(word_count_1, cluster_result_path + "/wordcloud1", mask_path)

    return result


def main():
    """
    主程序
    """

    # 输入参数
    TEXT_PATH = input("请输入待分析文本的路径：")
    STOPWORDS_PATH = input("请输入停用词表的路径：")
    LANGUAGE = input("请输入文本语言：")
    MASK_PATH = input("请输入词云形状的图片路径：")
    OUTPUT_FOLDER_PATH = input("请输入分析结果的输出文件夹名称：")

    # 创建输出分析结果的文件夹
    mkdir(OUTPUT_FOLDER_PATH)

    # 导入文本
    lines = text_import(TEXT_PATH)

    # 停词，分词
    segment_result = segment(lines, stopwords_path=STOPWORDS_PATH, language=LANGUAGE)

    # 统计词频
    word_count = word_stat(segment_result, OUTPUT_FOLDER_PATH + "/words_stat")

    # 输出词云图片
    word_cloud(word_count, OUTPUT_FOLDER_PATH + "/wordcloud", MASK_PATH)

    # 借助文本处理库输出tf-idf特征集, 输出特征集，返回文档向量表示
    vec_matrix = feature_set(segment_result, OUTPUT_FOLDER_PATH + "/feature_set.json")

    # 输出重心向量
    gravity = find_gravity(vec_matrix, OUTPUT_FOLDER_PATH + "/gravity.json")

    # 输出距离示例
    distinct_example(vec_matrix, OUTPUT_FOLDER_PATH + "/distinct example.json")

    # 聚类结果
    cluster_result = k_cluster(vec_matrix, OUTPUT_FOLDER_PATH, segment_result, MASK_PATH)


main()

"""
以下为输入的参数示例：

中文文本分析输入的参数：
online_reviews_texts.txt
stopwords_list.txt
zh-CN
crab.jpg
zh-CN

外文文本分析输入的参数：
tweets_apple_stock.txt
stopwords_list.txt
en-US
apple.jpeg
en-US
"""
