#!/usr/bin/env python3
"""
LocalFileWrapperWg - 专门处理本地专家数据文件的包装类
"""

import pandas as pd
import random
from typing import List, Dict

# 尝试导入DataProto，如果失败则创建mock版本
try:
    from verl import DataProto
except ImportError:
    # Mock DataProto for testing
    class DataProto:
        def __init__(self):
            self.non_tensor_batch = {}
            self.meta_info = {}


class LocalFileWrapperWg:
    """专门处理本地专家数据文件的包装类"""
    
    def __init__(self, config, tokenizer, expert_dataset_path=None, enable_message_expansion=False):
        self.config = config
        self.tokenizer = tokenizer
        
        # 消息扩展功能配置
        self.enable_message_expansion = enable_message_expansion
        
        # 优先使用传入的路径，其次从chord配置中获取
        if expert_dataset_path is None:
            chord_config = getattr(config, 'chord', {})
            expert_dataset_path = chord_config.get('expert_dataset_path', None)
        
        self.expert_dataset_path = expert_dataset_path
        
        print(f'🎓 CHORD LocalFileWrapper初始化，专家数据集路径: {expert_dataset_path}')
        print(f'🔧 消息扩展功能: {"启用" if enable_message_expansion else "禁用"}')
        if expert_dataset_path is None:
            raise ValueError("LocalFileWrapperWg必须提供expert_dataset_path参数，请在配置中设置chord.expert_dataset_path")
        self._load_expert_dataset()

    def _load_expert_dataset(self):
        """加载专家数据集 - 支持单个文件或文件夹递归加载"""
        import numpy as np
        import os
        import glob
        
        print(f"正在加载专家数据集: {self.expert_dataset_path}")
        
        try:
            # 获取所有需要处理的parquet文件
            parquet_files = self._find_parquet_files(self.expert_dataset_path)
            
            if not parquet_files:
                raise ValueError(f"在路径 {self.expert_dataset_path} 下没有找到任何parquet文件")
            
            print(f"🔍 找到 {len(parquet_files)} 个parquet文件")
            
            # 读取并合并所有parquet文件
            all_dataframes = []
            total_records = 0
            
            for file_path in parquet_files:
                try:
                    df = pd.read_parquet(file_path)
                    all_dataframes.append(df)
                    total_records += len(df)
                    print(f"  ✅ 加载文件: {os.path.basename(file_path)} ({len(df)} 条记录)")
                except Exception as e:
                    print(f"  ⚠️ 跳过文件 {file_path}: {e}")
                    continue
            
            if not all_dataframes:
                raise ValueError("没有成功加载任何parquet文件")
            
            # 合并所有数据框
            self.expert_data = pd.concat(all_dataframes, ignore_index=True)
            print(f"📚 成功合并 {len(all_dataframes)} 个文件，共 {len(self.expert_data)} 条专家对话数据")
            
            # 预处理专家数据，确保messages格式正确
            valid_data = []
            invalid_count = 0
            original_count = 0
            expanded_count = 0
            
            for idx, row in self.expert_data.iterrows():
                messages = row['messages']
                
                # 如果messages是ndarray，转换为Python list
                if isinstance(messages, np.ndarray):
                    messages = messages.tolist()
                
                if self._validate_messages(messages):
                    original_count += 1
                    
                    if self.enable_message_expansion:
                        # 扩展消息
                        expanded_messages_list = self._expand_messages(messages)
                        for expanded_messages in expanded_messages_list:
                            if self._validate_messages(expanded_messages, allow_short=True):
                                valid_data.append(expanded_messages)
                                expanded_count += 1
                    else:
                        # 不扩展，直接添加原始消息
                        valid_data.append(messages)
                        expanded_count += 1
                else:
                    invalid_count += 1
                    if hasattr(self, 'verbose') and self.verbose:
                        print(f"⚠️ 跳过无效数据 {idx}: {type(messages)} - {str(messages)[:100]}...")
                    
            self.expert_messages = valid_data
            
            # 详细的统计信息
            print(f"📊 数据处理统计:")
            print(f"   原始对话数量: {original_count}")
            if self.enable_message_expansion:
                expansion_ratio = expanded_count / original_count if original_count > 0 else 0
                print(f"   扩展后对话数量: {expanded_count}")
                print(f"   扩展倍数: {expansion_ratio:.2f}x")
            print(f"   ✅ 有效的专家对话数据: {len(self.expert_messages)} 条")
            if invalid_count > 0:
                print(f"   ⚠️ 跳过无效数据: {invalid_count} 条")
            
            if len(self.expert_messages) == 0:
                raise ValueError("没有找到有效的专家对话数据，请检查数据格式")
            
        except Exception as e:
            raise ValueError(f"加载专家数据集失败: {e}")
    
    def _find_parquet_files(self, path):
        """递归查找所有parquet文件"""
        import os
        import glob
        
        parquet_files = []
        
        if os.path.isfile(path):
            # 如果是单个文件
            if path.endswith('.parquet'):
                parquet_files.append(path)
            else:
                print(f"⚠️ 指定的文件 {path} 不是parquet格式")
        elif os.path.isdir(path):
            # 如果是文件夹，递归搜索
            pattern = os.path.join(path, "**", "*.parquet")
            parquet_files = glob.glob(pattern, recursive=True)
        else:
            raise ValueError(f"路径 {path} 不存在或不是有效的文件/文件夹")
        
        # 按文件名排序，确保加载顺序一致
        parquet_files.sort()
        return parquet_files
    
    def _validate_messages(self, messages, allow_short=False):
        """验证消息格式是否正确
        
        Args:
            messages: 要验证的消息列表
            allow_short: 是否允许较短的对话（用于扩展后的子对话验证）
        """
        import numpy as np
        
        # 处理ndarray类型
        if isinstance(messages, np.ndarray):
            messages = messages.tolist()
            
        if not isinstance(messages, list):
            return False
        
        # 根据allow_short参数调整最小长度要求
        min_length = 3 if not allow_short else 3  # 至少需要 system + user + assistant
        if len(messages) < min_length:
            return False
            
        # 检查是否有system, user, assistant的基本结构
        roles = []
        for msg in messages:
            if isinstance(msg, dict):
                roles.append(msg.get('role', ''))
            elif hasattr(msg, 'get'):  # 处理其他类似字典的对象
                roles.append(msg.get('role', ''))
            else:
                # 跳过无法处理的消息类型
                continue
                
        if len(roles) == 0:
            return False
        
        # 基本验证：必须有system作为第一个（可选），至少有一个user和一个assistant
        has_system = len(roles) > 0 and roles[0] == 'system'
        has_user = 'user' in roles
        has_assistant = 'assistant' in roles
        
        if allow_short:
            # 对于扩展的短对话，要求更宽松
            # 至少需要一个user和一个assistant，最后必须是assistant的回复
            if has_user and has_assistant and len(roles) >= 2:
                # 确保最后一个消息是assistant的回复
                if roles[-1] == 'assistant':
                    return True
        else:
            # 原始验证逻辑：必须有system作为第一个，至少有一个user和一个assistant
            if (len(roles) >= 3 and 
                has_system and
                has_user and 
                has_assistant):
                return True
            
        return False
    
    def _messages_to_history(self, messages):
        """将messages转换为history格式"""
        import numpy as np
        
        # 处理ndarray类型
        if isinstance(messages, np.ndarray):
            messages = messages.tolist()
            
        history = []
        system_content = ""
        
        i = 0
        while i < len(messages):
            msg = messages[i]
            
            # 确保msg是字典类型，获取role和content
            if isinstance(msg, dict):
                role = msg.get('role', '')
                content = msg.get('content', '')
            elif hasattr(msg, 'get'):
                role = msg.get('role', '')
                content = msg.get('content', '')
            else:
                # 跳过无法处理的消息
                i += 1
                continue
            
            if role == 'system':
                system_content = content
                i += 1
                continue
                
            elif role == 'user':
                # 构造state
                state = content
                
                # 寻找对应的assistant回复
                if i + 1 < len(messages):
                    next_msg = messages[i + 1]
                    if isinstance(next_msg, dict):
                        next_role = next_msg.get('role', '')
                        next_content = next_msg.get('content', '')
                    elif hasattr(next_msg, 'get'):
                        next_role = next_msg.get('role', '')
                        next_content = next_msg.get('content', '')
                    else:
                        next_role = ''
                        next_content = ''
                    
                    if next_role == 'assistant':
                        llm_response = next_content
                        history.append({
                            'state': state,
                            'llm_response': llm_response,
                            'reward': 1.0  # 专家数据默认给予正奖励
                        })
                        i += 2
                    else:
                        # 没有对应的assistant回复，跳过
                        i += 1
                else:
                    # 已经是最后一个消息，跳过
                    i += 1
            else:
                i += 1
                
        return history
    
    def _expand_messages(self, messages):
        """
        扩展消息：从完整对话中生成多个不同长度的子对话
        
        例如原始对话：[system, user1, assistant1, user2, assistant2, user3, assistant3]
        扩展为：
        1. [system, user1, assistant1]
        2. [system, user1, assistant1, user2, assistant2]  
        3. [system, user1, assistant1, user2, assistant2, user3, assistant3]
        """
        import numpy as np
        
        # 处理ndarray类型
        if isinstance(messages, np.ndarray):
            messages = messages.tolist()
        
        if not isinstance(messages, list) or len(messages) < 3:
            return [messages]  # 返回原始消息
        
        expanded_list = []
        system_msg = None
        dialog_pairs = []
        current_pair = []
        
        # 解析messages结构
        for msg in messages:
            # 确保msg是字典类型
            if isinstance(msg, dict):
                role = msg.get('role', '')
                content = msg.get('content', '')
            elif hasattr(msg, 'get'):
                role = msg.get('role', '')
                content = msg.get('content', '')
            else:
                continue
            
            if role == 'system':
                system_msg = msg
            elif role == 'user':
                # 如果之前有未完成的对话对，先处理它
                if current_pair:
                    dialog_pairs.append(current_pair)
                # 开始一个新的对话对
                current_pair = [msg]
            elif role == 'assistant' and current_pair:
                # 完成当前对话对
                current_pair.append(msg)
                dialog_pairs.append(current_pair)
                current_pair = []
        
        # 处理最后可能未完成的对话对
        if current_pair:
            dialog_pairs.append(current_pair)
        
        # 生成扩展的messages列表
        if len(dialog_pairs) == 0:
            return [messages]  # 没有有效的对话对，返回原始消息
        
        for i in range(1, len(dialog_pairs) + 1):
            expanded_msg = []
            
            # 添加system消息（如果存在）
            if system_msg:
                expanded_msg.append(system_msg)
            
            # 添加前i个对话对
            for j in range(i):
                expanded_msg.extend(dialog_pairs[j])
            
            # 只有当扩展后的消息是完整对话对时才添加
            # （即最后一个消息应该是assistant的回复）
            if len(expanded_msg) >= 3 and expanded_msg[-1].get('role') == 'assistant':
                expanded_list.append(expanded_msg)
        
        # 如果没有生成任何有效的扩展，返回原始消息
        if not expanded_list:
            expanded_list = [messages]
        
        return expanded_list

    def get_expert_rollout_cache(self, num_envs, step=0):
        """从专家数据中根据step确定性选择并生成rollout_cache格式的数据
        
        Args:
            num_envs: 需要的环境数量
            step: 当前训练步数，用于确定性采样
        
        采样策略：
        1. 相同step采样相同的num_envs个样本，保证完全一致
        2. 不同step间尽量没有交集，或至少最小化重叠
        3. 使用循环偏移策略实现不同步数间的多样性
        """
        if not hasattr(self, 'expert_messages'):
            raise ValueError("专家数据未加载，请先调用_load_expert_dataset()")
        
        total_samples = len(self.expert_messages)
        
        # 设置随机种子，确保相同step的采样结果一致
        # 使用step作为随机种子的基础
        random.seed(step + 42)  # 加一42避免种子为0的情况
        
        if total_samples >= num_envs:
            # 专家数据足够，使用循环偏移策略实现不同步数间的多样性
            # 计算当前步数的起始偏移量
            offset = (step * num_envs) % total_samples
            
            # 生成不重复的索引序列
            indices = []
            for i in range(num_envs):
                idx = (offset + i) % total_samples
                indices.append(idx)
            
            # 为了增加随机性，对索引进行打乱（但保持确定性）
            random.shuffle(indices)
            
            selected_messages = [self.expert_messages[idx] for idx in indices]
            
            print(f"🎓 Step {step}: 从专家数据集({total_samples}个样本)中选择了 {len(selected_messages)} 个对话样本")
            print(f"    采样索引: {sorted(indices)[:5]}{'...' if len(indices) > 5 else ''}")
            
        else:
            # 专家数据不够，需要重复采样
            # 使用确定性的重复采样策略
            selected_messages = []
            for i in range(num_envs):
                # 使用step和index作为种子，确保不同步数的采样结果不同
                sample_seed = step * 1000 + i
                random.seed(sample_seed)
                idx = random.randint(0, total_samples - 1)
                selected_messages.append(self.expert_messages[idx])
            
            print(f"🎓 Step {step}: 从专家数据集({total_samples}个样本)中重复采样了 {len(selected_messages)} 个对话样本")
        
        # 重置随机种子，避免影响后续的随机操作
        random.seed()
        
        rollout_cache = []
        for i, messages in enumerate(selected_messages):
            history = self._messages_to_history(messages)
            
            cache_entry = {
                "env_id": i,
                "history": history,
                "group_id": i,
                "tag": "expert_data", 
                "penalty": 0,
                "environment_id": f"expert_{i}",
                "metrics": {},
                "env_info": {"source": "expert_dataset", "num_turns": len(history)}
            }
            rollout_cache.append(cache_entry)
            
        print(f"🎓 从专家数据集中选择了 {len(rollout_cache)} 个对话样本")
        return rollout_cache

    def generate_sequences(self, lm_inputs: DataProto) -> DataProto:
        """
        专家模式：直接返回空结果，rollout逻辑会处理专家数据
        """
        lm_outputs = DataProto()
        lm_outputs.non_tensor_batch = {
            'response_texts': [],
            'env_ids': lm_inputs.non_tensor_batch['env_ids'],
            'group_ids': lm_inputs.non_tensor_batch['group_ids']
        }
        lm_outputs.meta_info = lm_inputs.meta_info
        return lm_outputs 