# -*- coding: utf-8 -*-
"""
数据处理模块
处理佛山大学的问答数据，转换为训练格式
"""

import json
import pandas as pd
import random
from typing import List, Dict, Any
from datasets import Dataset, DatasetDict
from pathlib import Path

class UniversityDataProcessor:
    """佛山大学数据处理器"""
    
    def __init__(self, data_path: str):
        self.data_path = Path(data_path)
        self.training_data = []
        self.rag_data = []
        
    def load_cleaned_data(self):
        """加载清洗后的数据"""
        # 加载训练数据
        train_file = self.data_path / "foshan_university_training_go.json"
        rag_file = self.data_path / "foshan_university_rag_go.json"
        
        if train_file.exists():
            with open(train_file, 'r', encoding='utf-8') as f:
                self.training_data = json.load(f)
            print(f"加载训练数据: {len(self.training_data)} 条问答对")
        
        if rag_file.exists():
            with open(rag_file, 'r', encoding='utf-8') as f:
                self.rag_data = json.load(f)
            print(f"加载RAG数据: {len(self.rag_data)} 个数据块")
    
    def format_training_data(self) -> List[Dict[str, str]]:
        """格式化训练数据为标准格式"""
        formatted_data = []
        
        for item in self.training_data:
            # 构建系统提示词
            system_prompt = "你是佛山大学的智能问答助手。请根据提供的信息准确、友好地回答用户关于佛山大学的问题。回答要简洁明了，重点突出。"
            
            # 构建用户问题
            user_question = item.get('input', '').strip()
            if not user_question:
                continue
                
            # 构建助手回答
            assistant_answer = item.get('output', '').strip()
            if not assistant_answer:
                continue
            
            # 格式化为对话格式
            formatted_item = {
                "messages": [
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_question},
                    {"role": "assistant", "content": assistant_answer}
                ],
                "source_id": item.get('source_id', ''),
                "category": item.get('category', ''),
                "type": item.get('type', ''),
                "title": item.get('title', '')
            }
            
            formatted_data.append(formatted_item)
        
        print(f"格式化完成: {len(formatted_data)} 条训练数据")
        return formatted_data
    
    def create_conversation_format(self, messages: List[Dict]) -> str:
        """将消息转换为对话格式字符串"""
        conversation = ""
        for msg in messages:
            role = msg["role"]
            content = msg["content"]
            
            if role == "system":
                conversation += f"<|im_start|>system\n{content}<|im_end|>\n"
            elif role == "user":
                conversation += f"<|im_start|>user\n{content}<|im_end|>\n"
            elif role == "assistant":
                conversation += f"<|im_start|>assistant\n{content}<|im_end|>\n"
        
        return conversation
    
    def split_data(self, data: List[Dict], train_ratio=0.8, val_ratio=0.1, test_ratio=0.1):
        """分割数据集"""
        random.shuffle(data)
        total = len(data)
        
        train_end = int(total * train_ratio)
        val_end = train_end + int(total * val_ratio)
        
        train_data = data[:train_end]
        val_data = data[train_end:val_end]
        test_data = data[val_end:]
        
        print(f"数据分割: 训练集 {len(train_data)}, 验证集 {len(val_data)}, 测试集 {len(test_data)}")
        
        return train_data, val_data, test_data
    
    def create_datasets(self) -> DatasetDict:
        """创建HuggingFace数据集"""
        # 格式化数据
        formatted_data = self.format_training_data()
        
        # 转换为训练格式
        processed_data = []
        for item in formatted_data:
            conversation = self.create_conversation_format(item["messages"])
            processed_data.append({
                "text": conversation,
                "source_id": item["source_id"],
                "category": item["category"],
                "type": item["type"],
                "title": item["title"]
            })
        
        # 分割数据
        train_data, val_data, test_data = self.split_data(processed_data)
        
        # 创建数据集
        dataset_dict = DatasetDict({
            "train": Dataset.from_list(train_data),
            "validation": Dataset.from_list(val_data),
            "test": Dataset.from_list(test_data)
        })
        
        return dataset_dict
    
    def analyze_data(self):
        """分析数据分布"""
        if not self.training_data:
            print("请先加载数据")
            return
        
        # 统计问题类型分布
        type_counts = {}
        category_counts = {}
        
        for item in self.training_data:
            q_type = item.get('type', 'unknown')
            category = item.get('category', 'unknown')
            
            type_counts[q_type] = type_counts.get(q_type, 0) + 1
            category_counts[category] = category_counts.get(category, 0) + 1
        
        print("\n=== 数据分析 ===")
        print(f"总训练样本数: {len(self.training_data)}")
        
        print("\n问题类型分布(Top 10):")
        sorted_types = sorted(type_counts.items(), key=lambda x: x[1], reverse=True)
        for q_type, count in sorted_types[:10]:
            print(f"  {q_type}: {count}")
        
        print("\n类别分布:")
        sorted_categories = sorted(category_counts.items(), key=lambda x: x[1], reverse=True)
        for category, count in sorted_categories:
            print(f"  {category}: {count}")
    
    def save_processed_data(self, output_dir: str = "./processed_data"):
        """保存处理后的数据"""
        output_path = Path(output_dir)
        output_path.mkdir(exist_ok=True)
        
        # 创建数据集
        dataset_dict = self.create_datasets()
        
        # 保存数据集
        dataset_dict.save_to_disk(str(output_path))
        print(f"数据集已保存到: {output_path}")
        
        # 保存样本文件用于检查
        sample_file = output_path / "samples.json"
        samples = {
            "train_sample": dataset_dict["train"][:3],
            "val_sample": dataset_dict["validation"][:2],
            "test_sample": dataset_dict["test"][:2]
        }
        
        with open(sample_file, 'w', encoding='utf-8') as f:
            json.dump(samples, f, ensure_ascii=False, indent=2)
        
        print(f"样本文件已保存到: {sample_file}")
        
        return dataset_dict

def main():
    """主函数"""
    # 数据路径
    data_path = "./go-llm-cleaner/go_cleaned_data"
    
    # 创建数据处理器
    processor = UniversityDataProcessor(data_path)
    
    # 加载数据
    processor.load_cleaned_data()
    
    # 分析数据
    processor.analyze_data()
    
    # 处理和保存数据
    dataset_dict = processor.save_processed_data()
    
    print("\n=== 数据处理完成 ===")
    print(f"训练集大小: {len(dataset_dict['train'])}")
    print(f"验证集大小: {len(dataset_dict['validation'])}")
    print(f"测试集大小: {len(dataset_dict['test'])}")

if __name__ == "__main__":
    main()