"""
1. 生成一个脚本包含DataPreprocessing类，功能要求如下
        1- 能够读取.json文件，并将json文件中的数据提取出来并转换成.csv文件
        2- 能读取csv文件，并将csv文件转换为符合fasttext模型训练要求的fasttext_train.txt，包含 按照字符级别切分和词切分两种格式
        3- 从.csv文件中读取"text"列文本，并进行数据清洗
            1. 文本清洗
            2. 繁简转换     (暂定)
            3. 大小写归一化
            4. 标点符号处理
            5. 停用词去除
            6. 分词（中文必需）
            7. 移除噪声数据
    2. 调用示例
"""

import json
import pandas as pd
import jieba
import re
import os
from denoise import ASRDenormalizer

denoise = ASRDenormalizer()

stopword_file_path = "../data/stopwords.txt"


class DataPreprocessing:
    def __init__(self, stopword_file=None):
        """
        初始化预处理器（纯简体中文）
        :param stopword_file: 停用词文件路径（可选）
        """
        self.stopwords = self._load_stopwords(stopword_file)

    def _load_stopwords(self, file_path):
        """加载中文停用词"""
        if file_path and os.path.exists(file_path):
            with open(file_path, 'r', encoding='utf-8') as f:
                return set(line.strip() for line in f if line.strip())
        else:
            # 默认中文停用词
            return {'的', '了', '在', '上', '吗', '呢', '吧', '啊', '呀', '哦', '嗯',
                    '就', '都', '也', '还', '又', '再', '不', '没', '无', '非',
                    '很', '太', '真', '好', '这', '那', '哪', '谁', '什么',
                    '和', '与', '及', '或', '但', '却', '而', '以', '为',
                    '是', '有', '会', '可以', '能够', '应该', '想', '要',
                    '给', '把', '让', '叫', '看', '听', '玩', '用', ' '}

    def json_to_csv(self, json_path, csv_path, intents_output=None):
        """
        将 JSON 转为带 label 的 CSV
        """
        # intent → label 映射表（按实际项目调整）
        intent2label = {
            "Travel-Query": 0,
            "Music-Play": 1,
            "FilmTele-Play": 2,
            "Video-Play": 3,
            "Radio-Listen": 4,
            "HomeAppliance-Control": 5,
            "Weather-Query": 6,
            "Alarm-Update": 7,
            "Calendar-Query": 8,
            "TVProgram-Play": 9,
            "Audio-Play": 10
        }

        with open(json_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        rows = []
        for item in data.values():
            text = item["text"]
            intent = item["intent"]
            slots = item["slots"]
            label = intent2label.get(intent, -1)

            rows.append({
                "text": text,
                "intent": intent,
                "slots": json.dumps(slots, ensure_ascii=False),
                "label": label
            })

        df = pd.DataFrame(rows)
        df.to_csv(csv_path, index=False, encoding='utf-8-sig')
        print(f"SON 已转换为 CSV：{csv_path}")

        # 保存 intent 类别顺序
        if intents_output:
            with open(intents_output, 'w', encoding='utf-8') as f:
                for intent in intent2label.keys():
                    f.write(f"{intent}\n")
            print(f"Intent 类别已保存：{intents_output}")

    def clean_text(self, text):
        """
        纯简体中文文本清洗
        """
        if not isinstance(text, str):
            text = str(text)

        # 1. 数字处理：统一替换为 <NUM>
        text = re.sub(r'\d+', ' <NUM> ', text)

        # 2. 标点处理：保留常见中文标点，去除特殊符号
        text = re.sub(r'[^\u4e00-\u9fa5，。！？；：、\s]', ' ', text)

        # 3. 多空格合并
        text = re.sub(r'\s+', ' ', text).strip()

        text = denoise.denormalize(text)

        # 4. 分词
        words = jieba.lcut(text)

        # 5. 去除停用词
        filtered = [w for w in words if w not in self.stopwords and w.strip() != '']

        return ' '.join(filtered)

    def csv_to_fasttext(self, csv_path, output_path, char_level=False):
        """
        将 CSV 转为 FastText 格式
        :param char_level: True=字符级，False=词级
        """
        df = pd.read_csv(csv_path, encoding='utf-8')

        with open(output_path, 'w', encoding='utf-8') as f:
            for _, row in df.iterrows():
                intent = row['intent']
                text = row['text']

                cleaned = self.clean_text(text)

                if char_level:
                    # 字符级：逐字切分（跳过空格）
                    tokens = ' '.join([c for c in cleaned if c != ' '])
                else:
                    # 词级：保持分词后空格分隔
                    tokens = cleaned

                f.write(f"__label__{intent} {tokens}\n")

        print(f" 已生成 FastText 文件：{output_path} (char_level={char_level})")

    def run_pipeline(self, input_json, output_csv, fasttext_word, fasttext_char, intents_file=None):
        """
        完整流程：JSON → CSV → FastText（词级 + 字符级）
        """
        print("数据管道处理json文件JSON → CSV → FastText")
        self.json_to_csv(input_json, output_csv, intents_output=intents_file)
        self.csv_to_fasttext(output_csv, fasttext_word, char_level=False)
        self.csv_to_fasttext(output_csv, fasttext_char, char_level=True)


# ======================================
# 测试函数
# ======================================

def demo_test_pipeline():
    """简体中文流程"""
    train_json_path = "../data/noise_data/train_noise.json"
    test_json_path = "../data/noise_data/test_noise.json"
    dev_json_path = "../data/noise_data/dev_noise.json"

    train_csv_path = "../data/noise_data/train.csv"
    test_csv_path = "../data/noise_data/test.csv"
    dev_csv_path = "../data/noise_data/dev.csv"

    train_fasttext_char = "../data/noise_data/train_fasttext_char.txt"
    train_fasttext_words = "../data/noise_data/train_fasttext_words.txt"

    test_fasttext_char = "../data/noise_data/test_fasttext_char.txt"
    test_fasttext_words = "../data/noise_data/test_fasttext_words.txt"

    dev_fasttext_char = "../data/noise_data/dev_fasttext_char.txt"
    dev_fasttext_words = "../data/noise_data/dev_fasttext_words.txt"

    processor = DataPreprocessing(stopword_file_path)
    processor.run_pipeline(
        input_json=test_json_path,
        output_csv=test_csv_path,
        fasttext_word=test_fasttext_char,
        fasttext_char=test_fasttext_words,
        intents_file="../data/train_intents.txt"
    )

    # 验证输出
    for f in [test_csv_path, test_fasttext_words, test_fasttext_char]:
        assert os.path.exists(f), f" 文件缺失：{f}"
        print(f"{f} 已生成")


if __name__ == "__main__":
    demo_test_pipeline()
