{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe0828ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import os\n",
    "import re\n",
    "import json\n",
    "import jieba\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from gensim.models import Word2Vec\n",
    "from transformers import BertTokenizer, BertModel\n",
    "import torch\n",
    "from sklearn.manifold import TSNE\n",
    "\n",
    "# ================== 路径配置 ==================\n",
    "ORIGINAL_PATH = r'D:\\BaiduNetdiskDownload\\微博评论_202505061835.csv'\n",
    "CLEANED_PATH = os.path.splitext(ORIGINAL_PATH)[0] + '_cleaned.csv'\n",
    "TOKENIZED_PATH = os.path.splitext(ORIGINAL_PATH)[0] + '_tokens.json'\n",
    "VECTOR_DIR = os.path.dirname(ORIGINAL_PATH)\n",
    "\n",
    "\n",
    "# ================== 文本清洗类 ==================\n",
    "class TextCleaner:\n",
    "    @staticmethod\n",
    "    def clean_text(text):\n",
    "        \"\"\"五阶段文本清洗流水线\"\"\"\n",
    "        # 阶段1：URL和@处理\n",
    "        text = re.sub(r'http[s]?://\\S+', '', text)\n",
    "        text = re.sub(r'@[\\u4e00-\\u9fa5a-zA-Z0-9_]+', '', text)\n",
    "\n",
    "        # 阶段2：表情符号和话题标签\n",
    "        text = re.sub(r'\\[.*?\\]', '', text)\n",
    "        text = re.sub(r'#', '', text)\n",
    "\n",
    "        # 阶段3：数值规范化\n",
    "        text = re.sub(r'\\d+元', '金额', text)\n",
    "        text = re.sub(r'\\d+', '数值', text)\n",
    "\n",
    "        # 阶段4：特殊符号处理\n",
    "        text = re.sub(r'[【】、↓→√▲▼★☆◆◇■□●○《》]+', '', text)\n",
    "        text = re.sub(r'[^\\u4e00-\\u9fa5a-zA-Z0-9]+', ' ', text)\n",
    "\n",
    "        # 阶段5：空格合并\n",
    "        return re.sub(r'\\s+', ' ', text).strip()\n",
    "\n",
    "\n",
    "# ================== 停用词管理类 ==================\n",
    "class StopwordsManager:\n",
    "    def __init__(self):\n",
    "        self.base_stopwords = self._load_base_stopwords()\n",
    "        self.internet_stopwords = self._load_internet_stopwords()\n",
    "        self.ad_stopwords = self._load_ad_stopwords()\n",
    "\n",
    "    def get_stopwords(self):\n",
    "        return self.base_stopwords | self.internet_stopwords | self.ad_stopwords\n",
    "\n",
    "    @staticmethod\n",
    "    def _load_base_stopwords():\n",
    "        return {'的', '了', '在', '是', '我', '你', '他', '这', '那', '就',\n",
    "                '和', '要', '不', '吗', '啊', '哦', '嗯', '吧'}\n",
    "\n",
    "    @staticmethod\n",
    "    def _load_internet_stopwords():\n",
    "        return {'http', 'cn', 'com', 'www', 'html', 'tcn', '图片评论',\n",
    "                '分享图片', '网页链接', '点击', '查看', '回复', '转发', '微博'}\n",
    "\n",
    "    @staticmethod\n",
    "    def _load_ad_stopwords():\n",
    "        return {'扫码', '关注', '添加', '微信', '公众号', 'QQ群', '加好友',\n",
    "                '领券', '优惠', '限时', '折扣', '秒杀', '点击下载'}\n",
    "\n",
    "\n",
    "# ================== 文本处理器 ==================\n",
    "class TextProcessor:\n",
    "    def __init__(self):\n",
    "        self.stopwords = StopwordsManager().get_stopwords()\n",
    "        self._init_jieba()\n",
    "\n",
    "    def _init_jieba(self):\n",
    "        \"\"\"初始化分词器\"\"\"\n",
    "        try:\n",
    "            jieba.load_userdict('电商词典.txt')\n",
    "        except FileNotFoundError:\n",
    "            print(\"未找到电商词典.txt，使用默认分词模式\")\n",
    "\n",
    "    def tokenize(self, text):\n",
    "        \"\"\"增强型分词流程\"\"\"\n",
    "        words = jieba.lcut(text)\n",
    "        return [word for word in words\n",
    "                if word not in self.stopwords\n",
    "                and len(word) > 1\n",
    "                and not word.isspace()]\n",
    "\n",
    "\n",
    "# ================== 主处理流程 ==================\n",
    "def main():\n",
    "    # ================== 数据加载 ==================\n",
    "    try:\n",
    "        df = pd.read_csv(ORIGINAL_PATH, encoding='utf-8-sig', engine='python')\n",
    "    except UnicodeDecodeError:\n",
    "        df = pd.read_csv(ORIGINAL_PATH, encoding='gbk', engine='python')\n",
    "    print(f\"成功加载数据，样本量：{len(df)}\")\n",
    "\n",
    "    # ================== 数据清洗 ==================\n",
    "    print(\"\\n正在清洗数据...\")\n",
    "    df['cleaned_text'] = df['text'].apply(TextCleaner.clean_text)\n",
    "\n",
    "    # ================== 文本处理 ==================\n",
    "    processor = TextProcessor()\n",
    "    tokenized_texts = [processor.tokenize(text) for text in df['cleaned_text']]\n",
    "\n",
    "    # 过滤空文本\n",
    "    valid_indices = [i for i, tokens in enumerate(tokenized_texts) if len(tokens) > 0]\n",
    "    filtered_texts = [tokenized_texts[i] for i in valid_indices]\n",
    "    print(f\"有效数据量：{len(filtered_texts)}\")\n",
    "\n",
    "    # ================== 保存清洗结果 ==================\n",
    "    df_valid = df.iloc[valid_indices].copy()\n",
    "    df_valid['tokenized_text'] = [' '.join(tokens) for tokens in filtered_texts]\n",
    "    try:\n",
    "        df_valid.to_csv(CLEANED_PATH, index=False, encoding='utf-8-sig',\n",
    "                        columns=['user', 'text', 'time', 'mid', 'cleaned_text', 'tokenized_text'])\n",
    "        print(f\"\\n清洗数据已保存至：{CLEANED_PATH}\")\n",
    "    except Exception as e:\n",
    "        print(f\"保存清洗数据失败：{str(e)}\")\n",
    "\n",
    "    # ================== 保存分词结果 ==================\n",
    "    token_dict = {\n",
    "        'tokens': filtered_texts,\n",
    "        'vocab_size': len({word for text in filtered_texts for word in text})\n",
    "    }\n",
    "    try:\n",
    "        with open(TOKENIZED_PATH, 'w', encoding='utf-8') as f:\n",
    "            json.dump(token_dict, f, ensure_ascii=False, indent=2)\n",
    "        print(f\"分词结果已保存至：{TOKENIZED_PATH}\")\n",
    "    except Exception as e:\n",
    "        print(f\"保存分词结果失败：{str(e)}\")\n",
    "\n",
    "    # ================== 向量化处理 ==================\n",
    "    # TF-IDF\n",
    "    print(\"\\n正在计算TF-IDF...\")\n",
    "    tfidf_vectorizer = TfidfVectorizer(\n",
    "        tokenizer=lambda x: x,\n",
    "        preprocessor=lambda x: x,\n",
    "        max_features=500\n",
    "    )\n",
    "    tfidf_matrix = tfidf_vectorizer.fit_transform(filtered_texts)\n",
    "\n",
    "    # Word2Vec\n",
    "    print(\"\\n正在训练Word2Vec...\")\n",
    "    w2v_model = Word2Vec(\n",
    "        sentences=filtered_texts,\n",
    "        vector_size=300,\n",
    "        window=5,\n",
    "        min_count=2,\n",
    "        workers=4\n",
    "    )\n",
    "\n",
    "    def text_to_vec(tokens):\n",
    "        vectors = [w2v_model.wv[word] for word in tokens if word in w2v_model.wv]\n",
    "        return np.mean(vectors, axis=0) if vectors else np.zeros(300)\n",
    "\n",
    "    w2v_vectors = np.array([text_to_vec(tokens) for tokens in filtered_texts])\n",
    "\n",
    "    # BERT\n",
    "    print(\"\\n正在生成BERT向量...\")\n",
    "    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "    model = BertModel.from_pretrained('bert-base-chinese')\n",
    "\n",
    "    def get_bert_vector(text):\n",
    "        inputs = tokenizer(\n",
    "            text,\n",
    "            return_tensors='pt',\n",
    "            padding=True,\n",
    "            truncation=True,\n",
    "            max_length=64\n",
    "        )\n",
    "        with torch.no_grad():\n",
    "            outputs = model(**inputs)\n",
    "        return outputs.last_hidden_state[:, 0, :].numpy().squeeze()\n",
    "\n",
    "    # 分批处理防止内存溢出\n",
    "    batch_size = 32\n",
    "    bert_texts = [' '.join(tokens) for tokens in filtered_texts]\n",
    "    bert_vectors = []\n",
    "    for i in range(0, len(bert_texts), batch_size):\n",
    "        batch = bert_texts[i:i + batch_size]\n",
    "        bert_vectors.extend([get_bert_vector(text) for text in batch])\n",
    "    bert_vectors = np.array(bert_vectors)\n",
    "\n",
    "    # ================== 保存向量数据 ==================\n",
    "    np.save(os.path.join(VECTOR_DIR, 'tfidf_vectors.npy'), tfidf_matrix.toarray())\n",
    "    np.save(os.path.join(VECTOR_DIR, 'w2v_vectors.npy'), w2v_vectors)\n",
    "    np.save(os.path.join(VECTOR_DIR, 'bert_vectors.npy'), bert_vectors)\n",
    "    print(\"\\n向量数据已保存至：\")\n",
    "    print(f\"- TF-IDF: {os.path.join(VECTOR_DIR, 'tfidf_vectors.npy')}\")\n",
    "    print(f\"- Word2Vec: {os.path.join(VECTOR_DIR, 'w2v_vectors.npy')}\")\n",
    "    print(f\"- BERT: {os.path.join(VECTOR_DIR, 'bert_vectors.npy')}\")\n",
    "\n",
    "    # ================== 可视化分析 ==================\n",
    "    def plot_vectors(vectors, title):\n",
    "        tsne = TSNE(n_components=2, random_state=42)\n",
    "        reduced = tsne.fit_transform(vectors)\n",
    "        plt.figure(figsize=(10, 8))\n",
    "        plt.scatter(reduced[:, 0], reduced[:, 1], alpha=0.6)\n",
    "        plt.title(title)\n",
    "        plt.show()\n",
    "\n",
    "    print(\"\\n生成可视化图表...\")\n",
    "    sample_size = min(100, len(filtered_texts))\n",
    "    sample_idx = np.random.choice(len(filtered_texts), sample_size, replace=False)\n",
    "\n",
    "    plot_vectors(tfidf_matrix[sample_idx].toarray(), \"TF-IDF 向量空间分布\")\n",
    "    plot_vectors(w2v_vectors[sample_idx], \"Word2Vec 向量空间分布\")\n",
    "    plot_vectors(bert_vectors[:sample_size], \"BERT 向量空间分布\")\n",
    "\n",
    "    # ================== 相似度分析 ==================\n",
    "    def cosine_sim(vec1, vec2):\n",
    "        return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n",
    "\n",
    "    test_samples = [\n",
    "        \"差 金额\",\n",
    "        \"淘宝 免单\",\n",
    "        \"数值 拼单\"\n",
    "    ]\n",
    "\n",
    "    print(\"\\n相似度分析示例：\")\n",
    "    for method, vectors in [('TF-IDF', tfidf_matrix),\n",
    "                            ('Word2Vec', w2v_vectors),\n",
    "                            ('BERT', bert_vectors)]:\n",
    "        print(f\"\\n{method} 相似度矩阵:\")\n",
    "\n",
    "        # 改进的样本匹配\n",
    "        indices = []\n",
    "        for i, tokens in enumerate(filtered_texts):\n",
    "            text = ' '.join(tokens)\n",
    "            if any(sample in text for sample in test_samples):\n",
    "                indices.append(i)\n",
    "                if len(indices) >= 5:  # 扩大样本池\n",
    "                    break\n",
    "        indices = list(set(indices))  # 去重\n",
    "\n",
    "        # BERT索引限制\n",
    "        if method == 'BERT':\n",
    "            indices = [i for i in indices if i < len(bert_vectors)]\n",
    "\n",
    "        if len(indices) < 2:\n",
    "            print(f\"仅找到 {len(indices)} 个有效样本，跳过\")\n",
    "            continue\n",
    "\n",
    "        print(f\"使用 {len(indices)} 个样本进行对比：\")\n",
    "\n",
    "        for i in indices:\n",
    "            for j in indices:\n",
    "                if i >= j:  # 避免重复计算\n",
    "                    continue\n",
    "\n",
    "                try:\n",
    "                    if method == 'TF-IDF':\n",
    "                        vec_i = vectors[i].toarray().flatten()\n",
    "                        vec_j = vectors[j].toarray().flatten()\n",
    "                    else:\n",
    "                        vec_i = vectors[i]\n",
    "                        vec_j = vectors[j]\n",
    "\n",
    "                    sim = cosine_sim(vec_i, vec_j)\n",
    "                    text_i = ' '.join(filtered_texts[i][:5])\n",
    "                    text_j = ' '.join(filtered_texts[j][:5])\n",
    "                    print(f\"[{text_i}...] vs [{text_j}...]: {sim:.4f}\")\n",
    "                except Exception as e:\n",
    "                    print(f\"计算错误：{str(e)}\")\n",
    "                    continue\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "env_name"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
