import jieba
import re
from data_loading import load_data, load_syndrome_mapping


# 文本清洗与预处理
def preprocess_text(text):
    """清洗中医文本：去除特殊字符、分词"""
    # 只保留中文、数字和常用标点
    text = re.sub(r'[^\u4e00-\u9fa50-9，。、；：！？\s]', '', text)
    # 使用jieba进行分词
    words = jieba.cut(text)
    return ' '.join(words)


# 合并多源文本特征
def combine_text_features(row):
    """合并主诉、现病史、四诊信息为统一文本"""
    return f"主诉：{row['chief_complaint']} 现病史：{row['description']} 四诊信息：{row['detection']}"


# 提取中医领域特征
def extract_tcm_features(row, syndrome_to_id=None):
    """提取中医症状、舌象、脉象等特征"""
    if syndrome_to_id is None:
        _, syndrome_to_id, _ = load_syndrome_mapping('syndrome.json')

    # 定义中医特征关键词
    symptom_keywords = ['头晕', '乏力', '咳嗽', '发热', '呕吐', '腹痛', '腹泻', '出血', '恶寒', '头痛']
    tongue_features = ['舌红', '舌淡', '舌暗', '苔白', '苔黄', '苔腻', '苔薄']
    pulse_features = ['脉弦', '脉细', '脉数', '脉缓', '脉浮', '脉沉', '脉弱']

    combined_text = row.get('combined_text', '')  # 获取合并后的文本

    # 提取症状特征
    symptom_features = []
    for keyword in symptom_keywords:
        symptom_features.append(1 if keyword in combined_text else 0)

    # 提取舌象特征
    tongue_features_list = []
    for feature in tongue_features:
        tongue_features_list.append(1 if feature in combined_text else 0)

    # 提取脉象特征
    pulse_features_list = []
    for feature in pulse_features:
        pulse_features_list.append(1 if feature in combined_text else 0)

    # 提取证型相关特征（示例：判断是否包含某证型的核心症状）
    core_symptoms = {
        "气虚不摄证": ["乏力", "舌淡", "脉细", "出血"],
        "风寒袭肺证": ["咳嗽", "恶寒", "苔白", "脉浮紧"],
        "血热证": ["舌红", "脉数", "出血", "发热"]
        # 可根据syndrome_df扩展更多证型的核心症状
    }

    core_features = []
    for syndrome, symptoms in core_symptoms.items():
        if syndrome in syndrome_to_id:
            # 检查是否包含所有核心症状
            if all(symptom in combined_text for symptom in symptoms):
                core_features.append(1)
            else:
                core_features.append(0)

    # 合并所有特征
    struct_features = symptom_features + tongue_features_list + pulse_features_list + core_features

    return struct_features


# 数据增强
def augment_chinese_text(texts, alpha_syn=0.1, alpha_ins=0.05):
    """对中文文本进行数据增强（同义词替换、随机插入）"""
    from textaugment import EDA
    eda = EDA(synonym_ratio=alpha_syn, insert_ratio=alpha_ins)
    return [eda.augment(text) for text in texts]


# 示例使用
if __name__ == "__main__":
    # 加载数据
    train_df = load_data('train.json')
    # 合并文本特征
    train_df['combined_text'] = train_df.apply(combine_text_features, axis=1)
    # 文本预处理
    train_df['combined_text'] = train_df['combined_text'].apply(preprocess_text)
    # 提取中医特征
    train_df = extract_tcm_features(train_df)
    # 查看处理后的数据
    print(f"预处理后数据列: {train_df.columns.tolist()}")
    print(f"示例特征: symptom_头晕={train_df['symptom_头晕'].sum()}条记录包含头晕症状")