import jieba
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import os
from gensim.models import Word2Vec
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from warnings import filterwarnings
from sklearn.cluster import KMeans

filterwarnings("ignore")

def get_stopwords(path):
    stops = []
    with codecs.open(path, "r") as f:
        for line in f:
            stops.append((line.strip()))
    return stops

def process_words(x, stops):
    return " ".join(list(filter(lambda x: x not in stops, jieba.lcut(x))))

def title2corpus(df, stops):
    df2 = df.copy()
    df2["corpus"] = df2["标题"].apply(lambda x: process_words(x, stops))
    return df2

def rule_filter(df, rules):
    df2 = df.copy()
    for key, rule in rules.items():
        for operator, value in rule.items():
            if operator == "ge":
                df2  =df2.loc[df2[key] >= value, :]
            else:
                df2 = df2.loc[df2[key] <= value, :]
    return df2

def get_word2vec(df):
    corpus = df["corpus"].tolist()
    corpus = [c.split(" ") for c in corpus]
    model = Word2Vec(corpus, size= 32, window= 3, min_count= 1, workers= -1)
    return model

def get_model(df):
    model = TfidfVectorizer(token_pattern=r'(?u)\b\w+\b')
    model.fit(df['corpus'])
    return model

def extract_keywords(model, df, path):
    weights = model.transform(df['corpus']).toarray()
    map = model.get_feature_names()
    n_topics = weights.shape[0]
    vocabs = []
    for i in range(n_topics):
        weight = weights[i, :]
        indices = np.argsort(weight)
        n_index = len(indices)
        for k in range(3):
            index = indices[n_index - 1 - k]
            vocab = map[index]
            vocabs.append(vocab)
    vocab2count = {}
    for vocab in vocabs:
        count = vocab2count.get(vocab, 0)
        count += 1
        vocab2count[vocab] = count
    vocab2count = sorted(vocab2count.items(), key=lambda x: x[1], reverse=True)
    os.makedirs(path, exist_ok= True)
    with codecs.open("{}/keywords.txt".format(path), "w") as f:
        for vocab_count in vocab2count:
            f.write("{}\n".format(vocab_count[0]))
    return vocab2count

def get_vocab2vec(vocab2count, word2vec_model):
    vocab2vec = {}
    for vocab_count in vocab2count:
        vocab = vocab_count[0]
        try:
            vocab2vec[vocab] = word2vec_model.wv.get_vector(vocab)
        except Exception as e:
            vocab2vec[vocab] = np.random.random((1, 32)).reshape(-1)
    vocabs = []
    vectors = []
    for vocab, vector in vocab2vec.items():
        vocabs.append(vocab)
        vectors.append(vector.reshape(-1))
    tsne = TSNE(n_components=2)
    vectors = tsne.fit_transform(np.array(vectors))
    vocab2vec = {}
    for vocab, vector in zip(vocabs, vectors):
        vocab2vec[vocab] = vector
    vocabs = []
    vectors = []
    for vocab, vector in vocab2vec.items():
        vocabs.append(vocab)
        vectors.append(vector)
    vectors = np.array(vectors)
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文标签
    plt.rcParams['axes.unicode_minus'] = False
    plt.figure(figsize=(15, 15), dpi= 300)
    n_clusters = 6
    kmeans = KMeans(n_clusters= n_clusters)
    labels = kmeans.fit_predict(vectors)
    for i in range(n_clusters):
        indices = np.where(labels == i)
        plt.scatter(vectors[indices,0], vectors[indices, 1])
    for i in range(len(vocabs)):
        plt.text(vectors[i,0], vectors[i, 1], vocabs[i])
    legends = []
    for i in range(n_clusters):
        legends.append("cluster:{}".format(i+1))
    plt.legend(legends)
    plt.title("keyword cluster visualization")
    plt.savefig("./visualization.jpg")
    plt.close()
    return vocab2vec
