{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正在加载电影数据...\n"
     ]
    }
   ],
   "source": [
    "# 预处理阶段：只加载需要的列和创建映射字典\n",
    "# 读取Movies表（只需要MOVIE_ID和NAME）\n",
    "print(\"正在加载电影数据...\")\n",
    "movies = pd.read_csv('./data/movies.csv', usecols=['MOVIE_ID', 'NAME'], dtype={'MOVIE_ID': str})\n",
    "movies_dict = movies.set_index('MOVIE_ID')['NAME'].to_dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正在加载用户数据...\n"
     ]
    }
   ],
   "source": [
    "# 读取Users表（只需要USER_MD5和USER_NICKNAME）\n",
    "print(\"正在加载用户数据...\")\n",
    "users = pd.read_csv('./data/users.csv', usecols=['USER_MD5', 'USER_NICKNAME'], dtype={'USER_MD5': str})\n",
    "users_dict = users.set_index('USER_MD5')['USER_NICKNAME'].to_dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义处理Comments的函数\n",
    "def process_comments(chunk):\n",
    "    # 映射电影名称\n",
    "    chunk['NAME'] = chunk['MOVIE_ID'].map(movies_dict)\n",
    "    # 映射用户昵称\n",
    "    chunk['USER_NICKNAME'] = chunk['USER_MD5'].map(users_dict)\n",
    "    # 重命名列\n",
    "    chunk = chunk.rename(columns={'USER_MD5': 'USER_ID'})\n",
    "    # 选择需要的列\n",
    "    return chunk[['MOVIE_ID', 'NAME', 'USER_ID', 'USER_NICKNAME', \n",
    "                 'COMMENT_ID', 'CONTENT', 'VOTES', 'RATING', 'COMMENT_TIME']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始处理评论数据...\n",
      "已处理 100000 条记录...\n",
      "已处理 200000 条记录...\n",
      "已处理 300000 条记录...\n",
      "已处理 400000 条记录...\n",
      "已处理 500000 条记录...\n",
      "已处理 600000 条记录...\n",
      "已处理 700000 条记录...\n",
      "已处理 800000 条记录...\n",
      "已处理 900000 条记录...\n",
      "已处理 1000000 条记录...\n",
      "已处理 1100000 条记录...\n",
      "已处理 1200000 条记录...\n",
      "已处理 1300000 条记录...\n",
      "已处理 1400000 条记录...\n",
      "已处理 1500000 条记录...\n",
      "已处理 1600000 条记录...\n",
      "已处理 1700000 条记录...\n",
      "已处理 1800000 条记录...\n",
      "已处理 1900000 条记录...\n",
      "已处理 2000000 条记录...\n",
      "已处理 2100000 条记录...\n",
      "已处理 2200000 条记录...\n",
      "已处理 2300000 条记录...\n",
      "已处理 2400000 条记录...\n",
      "已处理 2500000 条记录...\n",
      "已处理 2600000 条记录...\n",
      "已处理 2700000 条记录...\n",
      "已处理 2800000 条记录...\n",
      "已处理 2900000 条记录...\n",
      "已处理 3000000 条记录...\n",
      "已处理 3100000 条记录...\n",
      "已处理 3200000 条记录...\n",
      "已处理 3300000 条记录...\n",
      "已处理 3400000 条记录...\n",
      "已处理 3500000 条记录...\n",
      "已处理 3600000 条记录...\n",
      "已处理 3700000 条记录...\n",
      "已处理 3800000 条记录...\n",
      "已处理 3900000 条记录...\n",
      "已处理 4000000 条记录...\n",
      "已处理 4100000 条记录...\n",
      "已处理 4200000 条记录...\n",
      "已处理 4300000 条记录...\n",
      "已处理 4400000 条记录...\n",
      "已处理 4500000 条记录...\n",
      "数据处理完成！结果已保存至 ./output/Processed_Comments.csv\n"
     ]
    }
   ],
   "source": [
    "# 分块处理Comments数据（根据内存情况调整chunksize）\n",
    "print(\"开始处理评论数据...\")\n",
    "chunk_size = 100000  # 每次处理10万条\n",
    "output_path = './output/Processed_Comments.csv'\n",
    "\n",
    "# 初始化CSV写入（首次写入包含列头）\n",
    "first_chunk = True\n",
    "for chunk in pd.read_csv('./data/comments.csv', \n",
    "                        chunksize=chunk_size,\n",
    "                        dtype={\n",
    "                            'USER_MD5': str,\n",
    "                            'MOVIE_ID': str,\n",
    "                            'COMMENT_ID': str,\n",
    "                            'RATING': 'category',\n",
    "                            'VOTES': int\n",
    "                        }):\n",
    "    processed_chunk = process_comments(chunk)\n",
    "    \n",
    "    # 写入文件（模式选择追加模式）\n",
    "    processed_chunk.to_csv(output_path, \n",
    "                          mode='a', \n",
    "                          header=first_chunk, \n",
    "                          index=False,\n",
    "                          encoding='utf-8-sig')\n",
    "    first_chunk = False\n",
    "    print(f\"已处理 {chunk_size*(chunk.index[0]//chunk_size + 1)} 条记录...\")\n",
    "\n",
    "print(\"数据处理完成！结果已保存至\", output_path)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "douban",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
