import os
import json
import jieba
from typing import List, Dict, Set


class DataProcessor:
    def __init__(self, data_dir: str):
        # 确保数据目录是绝对路径
        self.data_dir = os.path.abspath(data_dir)
        print(f"设置数据目录为: {self.data_dir}")

        # 检查数据目录是否存在
        if not os.path.exists(self.data_dir):
            print(f"错误: 数据目录 {self.data_dir} 不存在")
            return

        # 列出数据目录中的文件
        data_files = os.listdir(self.data_dir)
        print(f"数据目录中的文件: {data_files}")

        self.tag2id = self._load_json("tag2id.json")  # 实体标签映射
        self.labels = self._load_json("labels.json")  # 关系标签映射
        self.vocab = self._load_vocab("vocab.txt")  # 词汇表
        self.entities = set()  # 实体集合
        self.relations = []  # 关系列表

    def _load_json(self, filename: str) -> Dict:
        """加载JSON文件"""
        path = os.path.join(self.data_dir, filename)
        if not os.path.exists(path):
            print(f"警告: 文件 {path} 不存在")
            return {}
        try:
            with open(path, "r", encoding="utf-8") as f:
                return json.load(f)
        except Exception as e:
            print(f"加载 {filename} 时出错: {e}")
            return {}

    def _load_vocab(self, filename: str) -> Set[str]:
        """加载词汇表"""
        path = os.path.join(self.data_dir, filename)
        if not os.path.exists(path):
            print(f"警告: 文件 {path} 不存在")
            return set()
        try:
            with open(path, "r", encoding="utf-8") as f:
                return set([line.strip() for line in f.readlines() if line.strip()])
        except Exception as e:
            print(f"加载 {filename} 时出错: {e}")
            return set()

    def process_spo_data(self, filename: str = "medical_spo_data.json") -> None:
        """解析SPO三元组数据（适配你的数据格式）"""
        raw_data = self._load_json(filename)
        if not raw_data:
            print("没有SPO数据可处理")
            return

        print(f"开始处理 {len(raw_data)} 个医疗记录...")

        total_spo_count = 0
        valid_spo_count = 0
        invalid_spo_count = 0

        for record_index, record in enumerate(raw_data):
            if "spo_list" not in record:
                print(f"警告: 第 {record_index + 1} 条记录缺少 spo_list")
                continue

            spo_list = record["spo_list"]
            total_spo_count += len(spo_list)

            for spo_index, spo_item in enumerate(spo_list):
                try:
                    # 检查必需的字段是否存在
                    required_fields = ["predicate", "object", "subject"]
                    missing_fields = [field for field in required_fields if field not in spo_item]

                    if missing_fields:
                        print(f"警告: 记录 {record_index + 1} 的SPO {spo_index + 1} 缺少字段: {missing_fields}")
                        invalid_spo_count += 1
                        continue

                    # 提取实体（subject 和 object 都是实体）
                    self.entities.add(spo_item["subject"])
                    self.entities.add(spo_item["object"])

                    # 提取关系
                    self.relations.append({
                        "source": spo_item["subject"],
                        "target": spo_item["object"],
                        "type": spo_item["predicate"],
                        "text": record.get("text", "")[:100] + "..."  # 截取部分文本
                    })

                    valid_spo_count += 1

                except Exception as e:
                    print(f"处理记录 {record_index + 1} 的SPO {spo_index + 1} 时出错: {e}")
                    invalid_spo_count += 1

        print(f"解析完成统计:")
        print(f"  - 总医疗记录: {len(raw_data)}")
        print(f"  - 总SPO三元组: {total_spo_count}")
        print(f"  - 有效SPO: {valid_spo_count}")
        print(f"  - 无效SPO: {invalid_spo_count}")
        print(f"  - 实体数: {len(self.entities)}")
        print(f"  - 关系数: {len(self.relations)}")

        # 显示统计信息
        if self.relations:
            print(f"\n关系类型统计:")
            relation_types = {}
            for rel in self.relations:
                relation_types[rel["type"]] = relation_types.get(rel["type"], 0) + 1

            for rel_type, count in sorted(relation_types.items(), key=lambda x: x[1], reverse=True):
                print(f"  {rel_type}: {count} 个")

    def process_raw_text(self, filename: str = "data.txt") -> List[str]:
        """处理原始文本数据（用于模型预测）"""
        path = os.path.join(self.data_dir, filename)
        if not os.path.exists(path):
            print(f"信息: 文件 {path} 不存在，跳过文本处理")
            return []

        try:
            with open(path, "r", encoding="utf-8") as f:
                texts = [line.strip() for line in f.readlines() if line.strip()]

            if not texts:
                print("信息: 文本文件为空")
                return []

            print(f"开始处理 {len(texts)} 条文本...")
            # 分词处理（基于词汇表）
            processed_texts = []
            for i, text in enumerate(texts):
                words = jieba.lcut(text)
                # 过滤不在词汇表中的词
                filtered_words = [word for word in words if word in self.vocab]
                processed_texts.append(" ".join(filtered_words))

                if (i + 1) % 1000 == 0:  # 每处理1000条显示进度
                    print(f"已处理 {i + 1}/{len(texts)} 条文本")

            print(f"文本处理完成 - 共处理 {len(processed_texts)} 条文本")
            return processed_texts
        except Exception as e:
            print(f"处理文本数据时出错: {e}")
            return []

    def save_processed_data(self, output_dir: str = "../processed") -> None:
        """保存处理后的实体和关系数据到项目根目录的processed文件夹"""
        # 确保使用绝对路径
        output_dir = os.path.abspath(output_dir)
        os.makedirs(output_dir, exist_ok=True)

        # 保存实体
        entities_path = os.path.join(output_dir, "entities.json")
        with open(entities_path, "w", encoding="utf-8") as f:
            json.dump(list(self.entities), f, ensure_ascii=False, indent=2)

        # 保存关系
        relations_path = os.path.join(output_dir, "relations.json")
        with open(relations_path, "w", encoding="utf-8") as f:
            json.dump(self.relations, f, ensure_ascii=False, indent=2)

        print(f"处理后的数据已保存至 {output_dir}")
        print(f"- 实体文件: {entities_path}")
        print(f"- 关系文件: {relations_path}")

    def create_sample_data(self):
        """创建示例数据用于测试"""
        print("创建示例数据...")
        # 示例实体数据
        self.entities = {
            "人工智能", "机器学习", "深度学习", "神经网络",
            "自然语言处理", "计算机视觉", "数据挖掘"
        }

        # 示例关系数据
        self.relations = [
            {"source": "人工智能", "target": "机器学习", "type": "包含", "text": "人工智能包含机器学习"},
            {"source": "人工智能", "target": "深度学习", "type": "包含", "text": "人工智能包含深度学习"},
            {"source": "机器学习", "target": "深度学习", "type": "包含", "text": "机器学习包含深度学习"},
            {"source": "深度学习", "target": "神经网络", "type": "基于", "text": "深度学习基于神经网络"},
            {"source": "人工智能", "target": "自然语言处理", "type": "包含", "text": "人工智能包含自然语言处理"},
            {"source": "人工智能", "target": "计算机视觉", "type": "包含", "text": "人工智能包含计算机视觉"},
            {"source": "机器学习", "target": "数据挖掘", "type": "相关", "text": "机器学习与数据挖掘相关"}
        ]

        print(f"示例数据创建完成 - 实体数: {len(self.entities)}, 关系数: {len(self.relations)}")


# 使用示例
if __name__ == "__main__":
    # 获取项目根目录
    PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    DATA_DIR = os.path.join(PROJECT_ROOT, "data")

    print(f"项目根目录: {PROJECT_ROOT}")
    print(f"数据目录: {DATA_DIR}")

    # 检查数据目录是否存在
    if not os.path.exists(DATA_DIR):
        print(f"错误: 数据目录 {DATA_DIR} 不存在")
        print("将创建示例数据...")
        processor = DataProcessor(data_dir=os.path.dirname(__file__))  # 使用脚本所在目录
        processor.create_sample_data()
    else:
        print("找到数据目录，开始处理数据...")
        processor = DataProcessor(data_dir=DATA_DIR)

        # 尝试处理SPO数据
        processor.process_spo_data()

        # 如果SPO数据为空，使用示例数据
        if len(processor.entities) == 0:
            print("SPO数据为空，使用示例数据")
            processor.create_sample_data()

    # 处理原始文本（可选）
    processed_texts = processor.process_raw_text()
    if processed_texts:
        print(f"成功处理 {len(processed_texts)} 条文本数据")

    # 保存处理后的数据到项目根目录的processed文件夹
    processor.save_processed_data("../processed")

    # 显示一些示例
    print("\n实体示例:")
    entities_list = list(processor.entities)
    for i, entity in enumerate(entities_list[:5]):
        print(f"  {i + 1}. {entity}")

    print("\n关系示例:")
    for i, relation in enumerate(processor.relations[:3]):
        print(f"  {i + 1}. {relation['source']} -> {relation['target']} ({relation['type']})")