{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa0a52b8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import pandas as pd \n",
    "import jieba \n",
    "import thulac \n",
    "from sklearn.feature_extraction.text   import TfidfVectorizer, CountVectorizer \n",
    "from gensim.models   import Word2Vec, FastText \n",
    "import numpy as np \n",
    "from time import time \n",
    "import re \n",
    "from collections import Counter \n",
    "import matplotlib.pyplot   as plt \n",
    "from sklearn.decomposition   import PCA \n",
    " \n",
    "# 设置全局中文字体配置（新增部分）\n",
    "plt.rcParams['font.sans-serif']  = ['SimHei']  # 设置中文字体 \n",
    "plt.rcParams['axes.unicode_minus']  = False  # 解决负号显示问题 \n",
    " \n",
    "# 1. 数据加载与初步清洗 \n",
    "def load_and_clean_data(filepath):\n",
    "    try:\n",
    "        df = pd.read_csv(filepath,   encoding='utf-8')\n",
    "        print(f\"原始数据量: {len(df)}条\")\n",
    "        \n",
    "        # 基础清洗 \n",
    "        df['cleaned'] = df['text'].apply(lambda x: re.sub(r'@\\w+\\s?',   '', str(x))) \n",
    "        df['cleaned'] = df['cleaned'].apply(lambda x: re.sub(r'http\\S+  |www\\S+', '', str(x))) \n",
    "        df['cleaned'] = df['cleaned'].apply(lambda x: re.sub(r'[^\\w\\s]',   '', str(x))) \n",
    "        df['cleaned'] = df['cleaned'].apply(lambda x: re.sub(r'\\s+',   ' ', str(x)).strip()) \n",
    "        \n",
    "        df = df[df['cleaned'].str.len()   > 0]\n",
    "        print(f\"清洗后数据量: {len(df)}条\")\n",
    "        return df \n",
    "    except Exception as e:\n",
    "        print(f\"加载数据时出错: {e}\")\n",
    "        return None \n",
    " \n",
    "# 2. 分词工具对比 \n",
    "def tokenize_with_jieba(text):\n",
    "    return list(jieba.cut(text))  \n",
    " \n",
    "def tokenize_with_thulac(text, thu):\n",
    "    return thu.cut(text,   text=True).split()\n",
    " \n",
    "def compare_tokenizers(df, sample_size=500):\n",
    "    thu = thulac.thulac(seg_only=True)  \n",
    "    sample_texts = df['cleaned'].sample(min(sample_size, len(df)), random_state=42)\n",
    "    \n",
    "    start = time()\n",
    "    jieba_results = sample_texts.apply(tokenize_with_jieba)  \n",
    "    jieba_time = time() - start \n",
    "    \n",
    "    start = time()\n",
    "    thu_results = sample_texts.apply(lambda   x: tokenize_with_thulac(x, thu))\n",
    "    thu_time = time() - start \n",
    "    \n",
    "    comparison = pd.DataFrame({\n",
    "        'Text': sample_texts,\n",
    "        'Jieba': jieba_results,\n",
    "        'THULAC': thu_results \n",
    "    })\n",
    "    \n",
    "    comparison['Same'] = comparison.apply(lambda   x: x['Jieba'] == x['THULAC'], axis=1)\n",
    "    same_percentage = comparison['Same'].mean() * 100 \n",
    "    \n",
    "    print(\"\\n分词工具对比结果:\")\n",
    "    print(f\"Jieba 处理时间: {jieba_time:.2f}秒\")\n",
    "    print(f\"THULAC 处理时间: {thu_time:.2f}秒\")\n",
    "    print(f\"分词结果一致的比例: {same_percentage:.2f}%\")\n",
    "    \n",
    "    print(\"\\n不一致的示例:\")\n",
    "    for i, row in comparison[~comparison['Same']].head(3).iterrows():\n",
    "        print(f\"\\n原文: {row['Text']}\")\n",
    "        print(f\"Jieba: {'/'.join(row['Jieba'])}\")\n",
    "        print(f\"THULAC: {'/'.join(row['THULAC'])}\")\n",
    "    return comparison \n",
    " \n",
    "# 3. 向量化方法实现 \n",
    "def traditional_vectorization(texts, method='tfidf'):\n",
    "    if method == 'tfidf':\n",
    "        vectorizer = TfidfVectorizer(tokenizer=tokenize_with_jieba, max_features=5000)\n",
    "    else: \n",
    "        vectorizer = CountVectorizer(tokenizer=tokenize_with_jieba, max_features=5000)\n",
    "    \n",
    "    X = vectorizer.fit_transform(texts)  \n",
    "    return X, vectorizer \n",
    " \n",
    "def deep_learning_embeddings(tokenized_texts, method='word2vec'):\n",
    "    if method == 'word2vec':\n",
    "        model = Word2Vec(sentences=tokenized_texts, vector_size=100, window=5, min_count=5, workers=4, epochs=10)\n",
    "    else: \n",
    "        model = FastText(sentences=tokenized_texts, vector_size=100, window=5, min_count=5, workers=4, epochs=10)\n",
    "    \n",
    "    def document_vector(words):\n",
    "        words = [w for w in words if w in model.wv]  \n",
    "        return np.mean(model.wv[words],   axis=0) if len(words) >=1 else np.zeros(model.vector_size) \n",
    "    \n",
    "    X = np.array([document_vector(text)   for text in tokenized_texts])\n",
    "    return X, model \n",
    " \n",
    "# 4. 可视化分析 (修改中文标签部分)\n",
    "def visualize_results(tokenized_texts, vector_model=None, top_n=20):\n",
    "    all_words = [word for text in tokenized_texts for word in text]\n",
    "    word_freq = Counter(all_words)\n",
    "    top_words = word_freq.most_common(top_n)  \n",
    "    \n",
    "    # 高频词条形图 \n",
    "    plt.figure(figsize=(12,  6))\n",
    "    words, counts = zip(*top_words)\n",
    "    plt.barh(range(len(words)),  counts, tick_label=words)\n",
    "    plt.gca().invert_yaxis()  \n",
    "    plt.title(f' 微博评论TOP {top_n}高频词分布')\n",
    "    plt.xlabel(' 出现频次')\n",
    "    plt.ylabel(' 关键词')\n",
    "    plt.tight_layout()  \n",
    "    plt.show()  \n",
    "    \n",
    "    # 词频变化曲线 \n",
    "    plt.figure(figsize=(12,  6))\n",
    "    plt.plot(range(1,  len(counts)+1), counts, marker='o', color='#FF6F61')\n",
    "    plt.xticks(range(1,  len(words)+1), words, rotation=45)\n",
    "    plt.title(' 高频词词频衰减曲线')\n",
    "    plt.xlabel(' 词语排序')\n",
    "    plt.ylabel(' 累计出现次数')\n",
    "    plt.grid(linestyle='--',  alpha=0.5)\n",
    "    plt.show()  \n",
    "    \n",
    "    # 词向量可视化 \n",
    "    if vector_model and hasattr(vector_model, 'wv'):\n",
    "        vocab = [w for w, _ in top_words if w in vector_model.wv]  \n",
    "        word_vectors = np.array([vector_model.wv[w]   for w in vocab])\n",
    "        \n",
    "        pca = PCA(n_components=2)\n",
    "        vectors_2d = pca.fit_transform(word_vectors)  \n",
    "        \n",
    "        plt.figure(figsize=(10,  8))\n",
    "        plt.scatter(vectors_2d[:,  0], vectors_2d[:, 1], c='#2E86C1', alpha=0.7)\n",
    "        for i, word in enumerate(vocab):\n",
    "            plt.annotate(word,  xy=(vectors_2d[i, 0], vectors_2d[i, 1]), \n",
    "                        xytext=(3,3), textcoords='offset points',\n",
    "                        fontsize=9, alpha=0.8)\n",
    "        plt.title(' 词向量语义空间分布（PCA降维）')\n",
    "        plt.xlabel(' 主成分1')\n",
    "        plt.ylabel(' 主成分2')\n",
    "        plt.grid(alpha=0.3) \n",
    "        plt.show()  \n",
    " \n",
    "# 主函数 \n",
    "def main():\n",
    "    filepath = r\"C:\\Users\\10430\\微博评论_202505061835.csv\"  \n",
    "    \n",
    "    df = load_and_clean_data(filepath)\n",
    "    if df is None:\n",
    "        return \n",
    "    \n",
    "    print(\"\\n正在进行分词工具对比...\")\n",
    "    comparison_results = compare_tokenizers(df)\n",
    "    \n",
    "    print(\"\\n使用Jieba进行分词...\")\n",
    "    df['tokenized'] = df['cleaned'].apply(tokenize_with_jieba)\n",
    "    \n",
    "    print(\"\\n传统向量化方法 (TF-IDF)...\")\n",
    "    tfidf_vectors, tfidf_model = traditional_vectorization(df['cleaned'], method='tfidf')\n",
    "    print(f\"TF-IDF矩阵形状: {tfidf_vectors.shape}\")  \n",
    "    \n",
    "    print(\"\\n深度学习方法 (Word2Vec)...\")\n",
    "    w2v_vectors, w2v_model = deep_learning_embeddings(df['tokenized'], method='word2vec')\n",
    "    print(f\"Word2Vec矩阵形状: {w2v_vectors.shape}\")  \n",
    "    \n",
    "    print(\"\\n生成可视化结果...\")\n",
    "    visualize_results(df['tokenized'], w2v_model)\n",
    "    \n",
    "    df.to_csv('processed_weibo_comments.csv',   index=False, encoding='utf-8')\n",
    "    w2v_model.save('word2vec_model.model')  \n",
    "    print(\"\\n处理完成! 数据已保存到 processed_weibo_comments.csv  和 word2vec_model.model\")  \n",
    " \n",
    "if __name__ == \"__main__\":\n",
    "    jieba.initialize()  \n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "env_name"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
