# src/data_clean/my_clean.py

import pandas as pd
import jieba
import random
import re
import joblib
import os
from utils.config import Config
root_path = 'D:/HeiMa/Pycharm/group4_nlp_project'
conf = Config(root_path)

# -------------------------------
# 1. 自定义中文同义词表（电商评论专用）
# -------------------------------
CUSTOM_SYNONYMS = {
    # 通用好评词
    "不错": ["挺好", "还可以", "良好", "满意", "靠谱"],
    "很好": ["非常好", "特别好", "棒极了", "优秀", "赞"],
    "好用": ["实用", "顺手", "方便", "高效"],
    "喜欢": ["喜爱", "钟爱", "青睐", "心动"],
    "满意": ["满足", "认可", "称赞", "惊喜"],
    "划算": ["实惠", "便宜", "性价比高", "值"],
    "快": ["迅速", "快速", "快捷", "麻利", "及时"],
    "舒服": ["舒适", "惬意", "放松", "自在"],
    "干净": ["整洁", "清爽", "利落"],
    "新鲜": ["新鲜的", "刚到的", "没坏", "水灵"],
    "正品": ["原装", "官方", "真货", "非假"],
    "正品行货": ["官方正品", "品牌直供", "正规渠道"],

    # 功能相关
    "加热": ["升温", "暖起来", "变热"],
    "制冷": ["降温", "凉快", "冷却"],
    "运行": ["工作", "启动", "运转"],
    "流畅": ["顺滑", "不卡", "丝滑", "稳定"],
    "清晰": ["清楚", "分明", "高清", "锐利"],
    "续航": ["待机", "电池耐用", "电量持久"],
    "拍照": ["成像", "照相", "摄影", "镜头"],
    "信号": ["网络", "连接", "接收", "覆盖"],

    # 质量相关
    "质量": ["品质", "做工", "材质", "用料", "工艺"],
    "耐用": ["结实", "皮实", "抗造", "寿命长"],
    "结实": ["牢固", "坚固", "厚实", "稳当"],

    # 价格相关
    "便宜": ["实惠", "低价", "折扣", "优惠"],
    "贵": ["价格高", "偏贵", "小贵", "昂贵"],

    # 服务相关
    "客服": ["服务人员", "售后", "人工", "店家"],
    "态度好": ["热情", "耐心", "友善", "周到"],
    "包装": ["外包装", "盒子", "包裹", "封装"],

    # 物流相关
    "物流": ["快递", "配送", "运送", "发货"],
    "速度快": ["送达快", "效率高", "及时", "神速"],

    # 品牌相关（可选）
    "蒙牛": ["Mengniu", "牛奶品牌", "大品牌"],
}


# -------------------------------
# 2. 同义词替换增强函数
# -------------------------------
def synonym_replacement(text, aug_ratio=0.2):
    """
    使用自定义同义词表替换词语
    :param text: 原始文本
    :param aug_ratio: 替换比例（默认 20%）
    :return: 增强后的文本
    """
    if not text or not text.strip():
        return text

    words = list(jieba.cut(text))
    new_words = words.copy()

    # 找出可以替换的词的位置
    replaceable_indices = [i for i, w in enumerate(words) if w in CUSTOM_SYNONYMS]
    random.shuffle(replaceable_indices)

    # 计算要替换的数量
    num_to_replace = max(1, int(len(words) * aug_ratio))
    num_to_replace = min(num_to_replace, len(replaceable_indices))

    # 替换
    for i in replaceable_indices[:num_to_replace]:
        synonyms_list = CUSTOM_SYNONYMS[words[i]]
        new_words[i] = random.choice(synonyms_list)

    return ''.join(new_words)


# -------------------------------
# 3. 加载数据
# -------------------------------
print("🔍 正在加载数据...")
data = pd.read_csv(conf.fast_file)

# 类别映射
category_mapping = {
    '书籍': 1, '平板': 2, '手机': 3, '水果': 4,
    '洗发水': 5, '衣服': 6, '酒店': 7, '计算机': 8,
    '蒙牛': 9, '热水器': 10
}
data['cat'] = data['cat'].map(category_mapping)

# 处理空值
data.dropna(subset=['review'], inplace=True)


# 文本清洗
def preprocess_text(text):
    if not text or not text.strip():
        return ''

    # 只去除标点符号，保留逗号和句号
    text = re.sub(r'[^\w\s,。]', '', text)
    text = text.lower().strip()
    return text


print("🧹 正在清洗文本...")
data['review_clean'] = data['review'].apply(preprocess_text)

# -------------------------------
# 4. 数据增强设置
# -------------------------------
# 要增强到 5000 条的类别（中文名对应的数字标签）
minority_classes = [1, 3, 8, 9, 10]  # 书籍, 手机, 计算机, 蒙牛, 热水器
target_count = 5000

print("📈 正在进行数据增强...")
augmented_rows = []

for cat in minority_classes:
    class_data = data[data['cat'] == cat].copy()
    current_count = len(class_data)

    if current_count >= target_count:
        print(f"类别 {cat}: 当前 {current_count} ≥ 目标 {target_count}，无需增强")
        continue

    num_to_generate = target_count - current_count
    print(f"类别 {cat}: 当前 {current_count} → 目标 {target_count}，需生成 {num_to_generate} 条")

    # 有放回采样原始数据用于增强
    sampled = class_data.sample(n=num_to_generate, replace=True, random_state=42).reset_index(drop=True)

    for _, row in sampled.iterrows():
        augmented_text = synonym_replacement(row['review_clean'])
        new_row = row.copy()
        new_row['review_clean'] = augmented_text
        augmented_rows.append(new_row)

# 合并增强数据
if augmented_rows:
    augmented_df = pd.DataFrame(augmented_rows)
    enhanced_data = pd.concat([data, augmented_df], ignore_index=True)
    print(f"✅ 增强完成！原始 {len(data)} → 增强后 {len(enhanced_data)}")
else:
    enhanced_data = data.copy()
    print("✅ 无需增强")

# -------------------------------
# 5. 查看增强后分布
# -------------------------------
print("\n📊 增强后类别分布:")
print(enhanced_data['cat'].value_counts().sort_index())

# -------------------------------
# 6. 保存结果
# -------------------------------
os.makedirs('../bert/data', exist_ok=True)
os.makedirs('../models', exist_ok=True)

# 保存清洗+增强后的数据
cleaned_df = enhanced_data[['cat', 'label', 'review_clean']]
cleaned_df.to_csv(conf.clean_file, index=False)

print("✅ 数据已保存至: ../data/cleaned_online_shopping_augmented.csv")
print("🎉 数据增强与清洗完成！")

