{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "37b7a56d-f00b-43e7-9716-666530e63f29",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import classification_report\n",
    "import nltk\n",
    "import re\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.tokenize import word_tokenize\n",
    "import string\n",
    "import shutil"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fde0607-227f-4d93-86f0-2fbdddb756da",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下载 NLTK 的停用词和分词工具\n",
    "nltk.download('punkt_tab')\n",
    "nltk.download('stopwords')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60123df2-82ed-4fa7-a453-2c0875730b2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 文本预处理函数\n",
    "def preprocess_text(text):\n",
    "    text = re.sub(r'[^\\w\\s\\u4e00-\\u9fa5]', '', text)\n",
    "    # 转为小写\n",
    "    text = text.lower()\n",
    "    # 去除标点符号\n",
    "    text = text.translate(str.maketrans('', '', string.punctuation))\n",
    "    # 分词\n",
    "    tokens = word_tokenize(text)\n",
    "    # 去除停用词\n",
    "    tokens = [word for word in tokens if word not in stopwords.words('english')]\n",
    "    return ' '.join(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8afe63a-f42f-4091-9226-dd9d28487c90",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取数据\n",
    "def load_data(file_path):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    \n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            # 分割行，提取标签和文本\n",
    "            parts = line.strip().split(' +++$+++ ')\n",
    "            if len(parts) == 2:\n",
    "                label = int(parts[0])  # 将标签转换为整数\n",
    "                text = parts[1]        # 文本部分\n",
    "                texts.append(preprocess_text(text))  # 预处理文本\n",
    "                labels.append(label)   # 保存标签\n",
    "\n",
    "    return texts, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3d2fdc96-b714-492c-909a-d3450d0f1b2b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取无标签数据的函数\n",
    "def load_unlabeled_data(file_path):\n",
    "    texts = []\n",
    "    \n",
    "    # 打开文件并逐行读取\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            # 去除行末的换行符和空格\n",
    "            text = line.strip()\n",
    "            if text:  # 确保文本不为空\n",
    "                texts.append(text)  # 将文本添加到列表中\n",
    "\n",
    "    return texts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a8263bb8-e7ad-4e3d-a224-fdcdb41f2364",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取测试数据的函数\n",
    "def load_test_data(file_path):\n",
    "    texts = []\n",
    "    \n",
    "    # 打开文件并逐行读取\n",
    "    with open('data/test.txt', 'r', encoding='utf-8', errors='ignore') as f:\n",
    "        for line in f:\n",
    "            # 去除行末的换行符和空格\n",
    "            line = line.strip()\n",
    "            if line:  # 确保文本不为空\n",
    "                # 分割行，提取文本\n",
    "                _, text = line.split(',', 1)  # 只分割一次，忽略序号\n",
    "                text = re.sub(r'[^\\w\\s\\u4e00-\\u9fa5]', '', text)\n",
    "                texts.append(text.strip())  # 去除文本前后的空白字符\n",
    "\n",
    "    return texts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f579bade-02a8-4058-8bf6-3005e79410c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载数据并预处理\n",
    "file_path = 'data/train.txt'  # 替换为你的文件路径\n",
    "print(\"load train data………………\")\n",
    "texts, labels = load_data(file_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "48c2e09a-ab17-46b3-ab93-dcd2041f78a1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将结果转换为 DataFrame（可选）\n",
    "data = pd.DataFrame({'text': texts, 'label': labels})\n",
    "print(\"data columns: \", data.columns.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ffb4fc3-1fca-4139-ba96-bf6de4c49e23",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 特征提取\n",
    "vectorizer = TfidfVectorizer(max_features=5000)\n",
    "X_train = vectorizer.fit_transform(data['text']).toarray()\n",
    "print(\"X_train: \", len(X_train))\n",
    "y_train = data['label'].values\n",
    "print(\"y_train: \", len(y_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "87e52c49-631c-4b84-85d3-1df92680ee2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 分割训练集和验证集\n",
    "# X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e4833b44-e79d-40cd-bf00-ab7ad117653a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建逻辑回归模型并训练\n",
    "model = LogisticRegression()\n",
    "model.fit(X_train, y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1601dbe7-42f7-410a-8ca3-af676ba1043b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 在验证集上评估模型性能\n",
    "# y_pred_val = model.predict(X_val)\n",
    "# print(\"Validation Classification Report:\")\n",
    "# print(classification_report(y_val, y_pred_val))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ed7e3ea-0273-444a-911b-6b9c98adbc11",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取无标签数据并进行预处理\n",
    "unlabeled_data = load_unlabeled_data('data/nolabel.txt')  # 确保文件路径正确\n",
    "unlabeled_data = pd.DataFrame({'text': unlabeled_data})\n",
    "# unlabeled_data = unlabeled_data.apply(preprocess_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "052566c8-c40a-4c21-8ad9-18a4ce943261",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 特征提取无标签数据\n",
    "X_unlabeled = vectorizer.transform(unlabeled_data).toarray()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6000896-6403-40a6-83a4-48e3383c6884",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用已训练的模型进行预测概率\n",
    "predicted_probs = model.predict_proba(X_unlabeled)  # 获取每个类的预测概率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "41bcd051-3c17-46d6-8553-4cdf4441fa06",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设定置信度阈值\n",
    "confidence_threshold = 0.5\n",
    "\n",
    "# 筛选高置信度样本的索引\n",
    "high_confidence_indices = np.where(np.max(predicted_probs, axis=1) > confidence_threshold)[0]\n",
    "high_confidence_samples = unlabeled_data.iloc[high_confidence_indices]\n",
    "high_confidence_labels = np.argmax(predicted_probs[high_confidence_indices], axis=1)  # 获取预测标签\n",
    "\n",
    "# 将高置信度样本添加到训练集中\n",
    "new_train_texts = data['text'].tolist() + high_confidence_samples['text'].tolist()\n",
    "new_train_labels = y_train.tolist() + high_confidence_labels.tolist()\n",
    "print(\"new_train_texts: \", len(new_train_texts))\n",
    "print(\"new_train_labels: \", len(new_train_labels))\n",
    "\n",
    "# 重新编码新训练集并创建新的 Dataset 对象\n",
    "new_train_encodings = vectorizer.fit_transform(new_train_texts).toarray()  # 更新特征提取器以适应新数据\n",
    "\n",
    "# 使用新的训练集重新训练模型（可选）\n",
    "model.fit(new_train_encodings, new_train_labels)\n",
    "\n",
    "print(\"Finished updating the training set with high-confidence samples.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f890c71-a9c5-4ec0-a9d2-b6a8d7afe66d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试集上进行推理\n",
    "test_data = load_test_data('data/test.txt')\n",
    "\n",
    "# 将结果转换为 DataFrame（可选）\n",
    "test_data = pd.DataFrame({'text': test_data})\n",
    "\n",
    "# test_data['text'] = test_data['text'].apply(preprocess_text)\n",
    "X_test = vectorizer.transform(test_data['text']).toarray()\n",
    "\n",
    "# 进行预测\n",
    "test_predictions = model.predict(X_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9b29fb5-240c-49b6-96a7-e30479ceb612",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 批次，如果进行了大的更新，建议保存相应代码及预测结果\n",
    "# 批次规则：YYYYMMDDXX，XX每日从01开始\n",
    "path = 'data/2025010201/'\n",
    "if not os.path.exists(path):\n",
    "    os.mkdir(path)\n",
    "# 输出结果或保存到文件中\n",
    "test_data.index.name = 'index'\n",
    "test_data['label'] = test_predictions\n",
    "test_data.to_csv(path+'predictions.csv', index=False, encoding='utf-8')\n",
    "test_data['label'].to_csv(path+'submission.csv', index=True, encoding='utf-8')\n",
    "\n",
    "# Save the notebook files\n",
    "shutil.copy('text_sentiment_analysis.ipynb', path+'text_sentiment_analysis.ipynb')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
