"""
数据收集器模块
用于收集LLM的输入输出数据并保存为训练用的parquet文件

author: Assistant
date: 2025-01-XX
"""

import pandas as pd
import numpy as np
from pathlib import Path
from typing import List, Dict, Any
from verl import DataProto
import json
from datetime import datetime


class TrainingDataCollector:
    """训练数据收集器"""

    def __init__(self, config):
        """
        初始化数据收集器

        Args:
            config: 配置对象，包含data_saving配置
        """
        self.config = config
        self.collected_data = []

        # 获取数据保存配置
        data_config = getattr(config, "data_saving", {})
        self.enabled = data_config.get("enabled", False)
        self.save_path = Path(data_config.get("save_path", "training_data"))
        self.train_ratio = data_config.get("train_ratio", 0.8)

    def is_enabled(self) -> bool:
        """检查数据收集是否启用"""
        return self.enabled

    def collect_conversation_data(self, lm_inputs: DataProto, lm_outputs: DataProto):
        """
        收集单轮对话数据

        Args:
            lm_inputs: LLM输入数据
            lm_outputs: LLM输出数据
        """
        if not self.enabled:
            return

        messages_list = lm_inputs.non_tensor_batch.get("messages_list", [])
        response_texts = lm_outputs.non_tensor_batch.get("response_texts", [])

        for messages, response in zip(messages_list, response_texts):
            # 转换messages为列表格式
            if hasattr(messages, "tolist"):
                messages = messages.tolist()
            elif isinstance(messages, np.ndarray):
                messages = messages.tolist()

            # 构建完整对话
            conversation_messages = []
            for msg in messages:
                conversation_messages.append(
                    {"role": msg["role"], "content": msg["content"]}
                )

            # 添加助手回复
            if response.strip():
                conversation_messages.append(
                    {"role": "assistant", "content": response.strip()}
                )

            self.collected_data.append({"messages": conversation_messages})

    def collect_final_conversations(self, rollout_states: List[Dict]):
        """
        收集完整的rollout对话历史

        Args:
            rollout_states: rollout状态数据列表
        """
        if not self.enabled:
            return

        for rollout_state in rollout_states:
            messages = []

            # 添加系统消息
            messages.append(
                {"role": "system", "content": "你是一个非常厉害的微服务专业的专家。"}
            )

            # 构建完整对话
            for turn_idx, history_item in enumerate(rollout_state["history"]):
                if "state" in history_item:
                    user_content = (
                        f"当前回合数 {turn_idx + 1}:\nState:\n{history_item['state']}"
                    )
                    messages.append({"role": "user", "content": user_content})

                if "llm_response" in history_item:
                    messages.append(
                        {"role": "assistant", "content": history_item["llm_response"]}
                    )

            # 只保存有效对话
            if len(messages) >= 3:
                self.collected_data.append({"messages": messages})

    def collect_from_messages_list(self, messages_list):
        """
        从messages_list收集完整对话数据。
        messages_list 应为由若干对话组成的列表或numpy数组，
        每个对话是形如[{"role": "system"|"user"|"assistant", "content": str}, ...] 的列表。
        """
        if not self.enabled:
            return

        if hasattr(messages_list, "tolist"):
            messages_iterable = messages_list.tolist()
        else:
            messages_iterable = messages_list

        for messages in messages_iterable:
            # 将numpy对象转换为python原生类型
            if hasattr(messages, "tolist"):
                messages = messages.tolist()
            elif isinstance(messages, np.ndarray):
                messages = messages.tolist()

            # 基础校验与规范化
            normalized_messages = []
            for msg in messages:
                if not isinstance(msg, dict):
                    continue
                role = msg.get("role", "").strip()
                content = str(msg.get("content", ""))
                if not role or content is None:
                    continue
                normalized_messages.append({"role": role, "content": content})

            if normalized_messages:
                self.collected_data.append({"messages": normalized_messages})

    def save_training_data(
        self, metadata: Dict[str, Any] = None, date_time_dir: str = None, model_name: str = None
    ):
        """保存收集的数据为train.parquet和test.parquet，并生成一次性元信息json。

        Args:
            metadata: 额外的元信息（例如{"sample_count": int, "metrics": {...}, "model_info": {...}}）。
            date_time_dir: 自定义的时间目录名（例如"20250101_120000"）。若不提供则使用当前时间。
            model_name: 模型名称，用于文件夹命名和info.json中。
        """
        if not self.enabled:
            print("数据保存未启用，跳过保存")
            return

        if not self.collected_data:
            print("没有收集到数据，跳过保存")
            return

        # 生成包含模型名称的目录名
        timestamp_str = date_time_dir or datetime.now().strftime("%Y%m%d_%H%M%S")
        if model_name:
            # 清理模型名称中的特殊字符，确保文件夹名称有效
            clean_model_name = "".join(c for c in model_name if c.isalnum() or c in '-_.')
            dir_name = f"{clean_model_name}_{timestamp_str}"
        else:
            dir_name = timestamp_str
        save_root = self.save_path / dir_name

        # 创建保存目录
        save_root.mkdir(parents=True, exist_ok=True)

        # 创建DataFrame
        df = pd.DataFrame(self.collected_data)
        total_samples = len(df)
        train_size = int(total_samples * self.train_ratio)

        # 随机打乱并拆分
        shuffled_df = df.sample(frac=1, random_state=42).reset_index(drop=True)
        train_df = shuffled_df[:train_size]
        test_df = shuffled_df[train_size:]

        # 保存文件
        train_path = save_root / "train.parquet"
        test_path = save_root / "test.parquet"

        train_df.to_parquet(train_path, index=False)
        test_df.to_parquet(test_path, index=False)

        # 生成并保存一次性元信息 json
        meta = metadata.copy() if isinstance(metadata, dict) else {}
        # 优先使用传入的样本数，否则回退到 collected_data 的数量
        sample_count = int(meta.get("sample_count", total_samples))
        
        # 安全地转换所有数据为JSON可序列化的格式
        def make_json_serializable(obj):
            """递归地将对象转换为JSON可序列化的格式"""
            if obj is None:
                return None
            elif isinstance(obj, (str, int, float, bool)):
                return obj
            elif isinstance(obj, (list, tuple)):
                return [make_json_serializable(item) for item in obj]
            elif isinstance(obj, dict):
                return {str(k): make_json_serializable(v) for k, v in obj.items()}
            elif hasattr(obj, '__dict__'):
                # 对于复杂对象，转换为字符串表示
                return str(obj)
            else:
                return str(obj)
        
        info = {
            "timestamp": timestamp_str,
            "directory_name": dir_name,
            "model_name": model_name or "unknown",
            "total_samples": sample_count,
            "train_samples": int(len(train_df)),
            "test_samples": int(len(test_df)),
            "metrics": make_json_serializable(meta.get("metrics", {})),
            "model_info": make_json_serializable(meta.get("model_info", {})),
        }
        
        info_path = save_root / "info.json"
        try:
            with info_path.open("w", encoding="utf-8") as f:
                json.dump(info, f, ensure_ascii=False, indent=2)
            print(f"✅ JSON元信息写入完成: {info_path}")
        except Exception as e:
            print(f"❌ JSON写入失败: {e}")
            # 尝试写入简化版本
            simplified_info = {
                "timestamp": timestamp_str,
                "directory_name": dir_name,
                "model_name": str(model_name or "unknown"),
                "total_samples": int(sample_count),
                "train_samples": int(len(train_df)),
                "test_samples": int(len(test_df)),
                "error": f"原始metadata序列化失败: {str(e)}"
            }
            try:
                with info_path.open("w", encoding="utf-8") as f:
                    json.dump(simplified_info, f, ensure_ascii=False, indent=2)
                print(f"⚠️  已保存简化版JSON: {info_path}")
            except Exception as e2:
                print(f"❌ 简化版JSON也写入失败: {e2}")
                raise

        print(f"📁 保存目录: {save_root}")
        print(f"🚂 训练数据已保存: {train_path} ({len(train_df)} 条)")
        print(f"🧪 测试数据已保存: {test_path} ({len(test_df)} 条)")
        print(f"📋 元信息已保存: {info_path}")
        if model_name:
            print(f"🤖 模型名称: {model_name}")

        # 清空收集的数据
        self.collected_data.clear()

    def get_stats(self) -> Dict[str, Any]:
        """获取收集数据的统计信息"""
        return {
            "total_conversations": len(self.collected_data),
            "enabled": self.enabled,
            "save_path": str(self.save_path),
            "train_ratio": self.train_ratio,
        }
