import pandas as pd
import re
import jieba
from jieba import analyse
import numpy as np
import os
from tqdm import tqdm

# 设置pandas显示选项
pd.set_option('display.max_columns', None)
tqdm.pandas()

print("=== 数据预处理开始 ===")

# 1. 加载数据
try:
    df = pd.read_csv('douban_movie_comments.csv')
    print(f"成功加载数据，共 {len(df)} 条记录")
except Exception as e:
    print(f"加载数据失败: {str(e)}")
    exit()

# 2. 数据清洗
print("\n[阶段1/3] 正在清洗数据...")


def clean_text(text):
    if pd.isna(text) or not isinstance(text, str):
        return ""
    text = re.sub(r'<[^>]+>', '', text)
    text = re.sub(r'[^\u4e00-\u9fa5\w\s，。！？、]', '', text)
    text = re.sub(r'\s+', ' ', text).strip()
    return text


df['评论内容'] = df['评论内容'].progress_apply(clean_text)
df['评论内容'] = df['评论内容'].fillna('无评论')
df['评分'] = df['评分'].replace('无', np.nan)

# 3. 加载停用词表（根据您的要求修改）
print("\n[阶段2/3] 正在准备分词工具...")

# 停用词文件优先级（只保留您有的两个文件）
STOPWORDS_PRIORITY = [
    'cn_stopwords.txt',  # 通用中文停用词
    'baidu_stopwords.txt'  # 百度停用词
]


def load_stopwords():
    """智能加载停用词表"""
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_stopwords = {'的', '了', '和', '是', '在', '我', '有', '你', '这', '那'}

    for filename in STOPWORDS_PRIORITY:
        file_path = os.path.join(script_dir, filename)
        if os.path.exists(file_path):
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    stopwords = [line.strip() for line in f if line.strip()]
                print(f"已加载: {filename} (共 {len(stopwords)} 个停用词)")
                return set(stopwords)
            except Exception as e:
                print(f"读取 {filename} 失败: {str(e)}")
                continue

    print("警告：将使用内置基础停用词 (10个)")
    return default_stopwords


stopwords = load_stopwords()
print(f"实际使用的停用词数量: {len(stopwords)}")

# 添加停用词过滤测试
test_text = "这是一个测试的句子，用于验证停用词是否被正确去除"
print("\n停用词过滤测试:")
print("原始文本:", test_text)
print("过滤结果:", ' '.join([word for word in jieba.cut(test_text) if word not in stopwords and len(word) > 1]))
print("=" * 50)

# 初始化jieba
jieba.initialize()
jieba.setLogLevel(20)

# 4. 分词和关键词提取
print("\n[阶段3/3] 正在处理文本内容...")


def seg_text(text):
    if not text or not isinstance(text, str):
        return ""
    words = jieba.cut(text)
    return ' '.join(word for word in words if word not in stopwords and len(word) > 1)


df['分词结果'] = df['评论内容'].progress_apply(seg_text)


def extract_keywords(text, topK=5):
    if not text or len(text) < 10:
        return ""
    return ','.join(analyse.extract_tags(text, topK=topK))


df['关键词'] = df['评论内容'].progress_apply(extract_keywords)

# 5. 保存结果
output_file = 'data_processed.csv'
try:
    df.to_csv(output_file, index=False, encoding='utf-8-sig')
    print(f"\n=== 处理完成 ===")
    print(f"结果已保存到: {output_file}")
    print("新增列说明:")
    print("- 分词结果: 分好词的文本 (已去除停用词)")
    print("- 关键词: 每条评论的TOP5关键词")

    # 显示前3条处理结果样例
    print("\n处理结果样例:")
    print(df[['评论内容', '分词结果', '关键词']].head(3))
except Exception as e:
    print(f"\n保存失败: {str(e)}")
    print("请检查文件是否被其他程序占用")
