"""数据集准备（增量预训练）"""
"""
数据集准备工具
用于处理和准备增量预训练所需的金融领域语料
"""
import os
import json
import pandas as pd
import numpy as np
import random
from typing import List, Dict, Any, Optional, Tuple
from sklearn.model_selection import train_test_split
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader
from fin_senti_entity_platform.data_collection.storage.mongo_storage import MongoStorage
from fin_senti_entity_platform.data_collection.storage.parquet_storage import ParquetStorage


class DatasetPreparer:
    """数据集准备器"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化数据集准备器
        
        Args:
            config: 配置参数，如果为None则从配置文件加载
        """
        # 如果没有提供配置，则从配置文件加载
        if config is None:
            config = ConfigLoader().get('dataset_preparer', {})
            
        self.config = config
        self.logger = Logger().get_logger(__name__)
        
        # 初始化存储
        self.mongo_storage = MongoStorage()
        self.parquet_storage = ParquetStorage()
        
        # 配置参数
        self.raw_data_dir = config.get('raw_data_dir', './data/raw')
        self.processed_data_dir = config.get('processed_data_dir', './data/processed')
        self.train_ratio = config.get('train_ratio', 0.8)
        self.val_ratio = config.get('val_ratio', 0.1)
        self.test_ratio = config.get('test_ratio', 0.1)
        
        # 确保目录存在
        os.makedirs(self.raw_data_dir, exist_ok=True)
        os.makedirs(self.processed_data_dir, exist_ok=True)
    
    def load_from_mongo(self, collection_name: str, query: Optional[Dict[str, Any]] = None, limit: Optional[int] = None) -> List[Dict[str, Any]]:
        """
        从MongoDB加载数据
        
        Args:
            collection_name: 集合名称
            query: 查询条件
            limit: 限制条数
        
        Returns:
            List[Dict[str, Any]]: 数据列表
        """
        try:
            self.logger.info(f"从MongoDB加载数据，集合: {collection_name}")
            
            # 如果没有提供查询条件，使用空字典
            if query is None:
                query = {}
                
            # 加载数据
            data = self.mongo_storage.find(collection_name, query, limit=limit)
            
            self.logger.info(f"成功加载 {len(data)} 条数据")
            
            return data
            
        except Exception as e:
            self.logger.error(f"从MongoDB加载数据失败: {str(e)}")
            raise
    
    def load_from_parquet(self, file_path: str) -> List[Dict[str, Any]]:
        """
        从Parquet文件加载数据
        
        Args:
            file_path: 文件路径
        
        Returns:
            List[Dict[str, Any]]: 数据列表
        """
        try:
            self.logger.info(f"从Parquet文件加载数据: {file_path}")
            
            # 加载数据
            data = self.parquet_storage.read(file_path)
            
            # 转换为字典列表
            result = []
            for record in data:
                # 转换numpy类型为Python原生类型
                processed_record = {}
                for key, value in record.items():
                    if isinstance(value, np.ndarray):
                        processed_record[key] = value.tolist()
                    elif isinstance(value, (np.int64, np.int32, np.int16, np.int8)):
                        processed_record[key] = int(value)
                    elif isinstance(value, (np.float64, np.float32, np.float16)):
                        processed_record[key] = float(value)
                    else:
                        processed_record[key] = value
                result.append(processed_record)
                
            self.logger.info(f"成功加载 {len(result)} 条数据")
            
            return result
            
        except Exception as e:
            self.logger.error(f"从Parquet文件加载数据失败: {str(e)}")
            raise
    
    def load_from_json(self, file_path: str) -> List[Dict[str, Any]]:
        """
        从JSON文件加载数据
        
        Args:
            file_path: 文件路径
        
        Returns:
            List[Dict[str, Any]]: 数据列表
        """
        try:
            self.logger.info(f"从JSON文件加载数据: {file_path}")
            
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                
            self.logger.info(f"成功加载 {len(data)} 条数据")
            
            return data
            
        except Exception as e:
            self.logger.error(f"从JSON文件加载数据失败: {str(e)}")
            raise
    
    def load_silc_efsa_dataset(self, file_path: str) -> List[Dict[str, Any]]:
        """
        加载SILC-EFSA数据集
        
        Args:
            file_path: 文件路径
        
        Returns:
            List[Dict[str, Any]]: 处理后的数据集
        """
        try:
            self.logger.info(f"加载SILC-EFSA数据集: {file_path}")
            
            # 读取CSV文件
            df = pd.read_csv(file_path)
            
            # 转换为字典列表
            data = []
            for _, row in df.iterrows():
                # 处理每行数据，根据SILC-EFSA数据集的格式进行解析
                # 这里假设SILC-EFSA数据集包含text、entity、sentiment等字段
                item = {
                    'text': row.get('text', ''),
                    'entities': row.get('entity', []),
                    'sentiments': row.get('sentiment', [])
                    # 添加其他必要的字段
                }
                data.append(item)
                
            self.logger.info(f"成功加载并处理 {len(data)} 条数据")
            
            return data
            
        except Exception as e:
            self.logger.error(f"加载SILC-EFSA数据集失败: {str(e)}")
            raise
    
    def clean_text(self, text: str) -> str:
        """
        清洗文本数据
        
        Args:
            text: 原始文本
        
        Returns:
            str: 清洗后的文本
        """
        # 移除多余的空白字符
        text = ' '.join(text.strip().split())
        
        # 移除特殊字符（保留中文、英文、数字和常见标点）
        import re
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9，。！？,.!?;:：；"\'"\(\)（）]', ' ', text)
        
        # 再次移除多余的空白字符
        text = ' '.join(text.strip().split())
        
        return text
    
    def prepare_pretrain_data(self, data: List[Dict[str, Any]], output_file: str) -> str:
        """
        准备预训练数据
        
        Args:
            data: 原始数据列表
            output_file: 输出文件路径
        
        Returns:
            str: 处理后的数据文件路径
        """
        try:
            self.logger.info(f"准备预训练数据，输出文件: {output_file}")
            
            # 提取并清洗文本
            pretrain_data = []
            for item in data:
                # 提取文本字段
                text = item.get('text', '') or item.get('content', '')
                
                if text:
                    # 清洗文本
                    cleaned_text = self.clean_text(text)
                    
                    if cleaned_text:
                        pretrain_data.append({'text': cleaned_text})
            
            # 确保输出目录存在
            output_dir = os.path.dirname(output_file)
            if output_dir:
                os.makedirs(output_dir, exist_ok=True)
                
            # 保存为JSON格式
            with open(output_file, 'w', encoding='utf-8') as f:
                for item in pretrain_data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            
            self.logger.info(f"成功准备预训练数据，共 {len(pretrain_data)} 条")
            
            return output_file
            
        except Exception as e:
            self.logger.error(f"准备预训练数据失败: {str(e)}")
            raise
    
    def split_dataset(self, data: List[Dict[str, Any]], output_prefix: str) -> Tuple[str, str, str]:
        """
        分割数据集为训练集、验证集和测试集
        
        Args:
            data: 原始数据
            output_prefix: 输出文件前缀
        
        Returns:
            Tuple[str, str, str]: 训练集、验证集、测试集文件路径
        """
        try:
            self.logger.info(f"分割数据集，输出前缀: {output_prefix}")
            
            # 确保输出目录存在
            output_dir = os.path.dirname(output_prefix)
            if output_dir:
                os.makedirs(output_dir, exist_ok=True)
                
            # 分割数据集
            train_data, test_data = train_test_split(data, test_size=self.test_ratio, random_state=42)
            train_data, val_data = train_test_split(train_data, test_size=self.val_ratio/(self.train_ratio + self.val_ratio), random_state=42)
            
            # 保存训练集
            train_file = f"{output_prefix}_train.json"
            with open(train_file, 'w', encoding='utf-8') as f:
                for item in train_data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            
            # 保存验证集
            val_file = f"{output_prefix}_val.json"
            with open(val_file, 'w', encoding='utf-8') as f:
                for item in val_data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            
            # 保存测试集
            test_file = f"{output_prefix}_test.json"
            with open(test_file, 'w', encoding='utf-8') as f:
                for item in test_data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            
            self.logger.info(f"成功分割数据集: 训练集 {len(train_data)} 条, 验证集 {len(val_data)} 条, 测试集 {len(test_data)} 条")
            
            return train_file, val_file, test_file
            
        except Exception as e:
            self.logger.error(f"分割数据集失败: {str(e)}")
            raise
    
    def create_pretrain_corpus(self, sources: List[Dict[str, Any]], output_file: str) -> str:
        """
        从多个数据源创建预训练语料
        
        Args:
            sources: 数据源列表，每个数据源包含type和path
            output_file: 输出文件路径
        
        Returns:
            str: 预训练语料文件路径
        """
        try:
            self.logger.info(f"从多个数据源创建预训练语料，输出文件: {output_file}")
            
            all_data = []
            
            # 从每个数据源加载数据
            for source in sources:
                source_type = source.get('type', 'json')
                source_path = source.get('path', '')
                
                if not os.path.exists(source_path):
                    self.logger.warning(f"数据源路径不存在: {source_path}")
                    continue
                    
                if source_type == 'mongo':
                    # 从MongoDB加载
                    collection_name = source.get('collection', '')
                    query = source.get('query', {})
                    limit = source.get('limit', None)
                    data = self.load_from_mongo(collection_name, query, limit)
                elif source_type == 'parquet':
                    # 从Parquet文件加载
                    data = self.load_from_parquet(source_path)
                elif source_type == 'json':
                    # 从JSON文件加载
                    data = self.load_from_json(source_path)
                elif source_type == 'silc_efsa':
                    # 加载SILC-EFSA数据集
                    data = self.load_silc_efsa_dataset(source_path)
                else:
                    self.logger.warning(f"不支持的数据源类型: {source_type}")
                    continue
                    
                all_data.extend(data)
            
            # 去重
            unique_data = []
            seen_texts = set()
            
            for item in all_data:
                text = item.get('text', '') or item.get('content', '')
                cleaned_text = self.clean_text(text)
                
                if cleaned_text and cleaned_text not in seen_texts:
                    seen_texts.add(cleaned_text)
                    unique_data.append({'text': cleaned_text})
            
            # 打乱顺序
            random.shuffle(unique_data)
            
            # 确保输出目录存在
            output_dir = os.path.dirname(output_file)
            if output_dir:
                os.makedirs(output_dir, exist_ok=True)
                
            # 保存为JSON格式
            with open(output_file, 'w', encoding='utf-8') as f:
                for item in unique_data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            
            self.logger.info(f"成功创建预训练语料，共 {len(unique_data)} 条数据")
            
            return output_file
            
        except Exception as e:
            self.logger.error(f"创建预训练语料失败: {str(e)}")
            raise