"""
数据集处理模块
支持预训练、SFT、RLHF等不同阶段的数据处理
"""

import json
import torch
from torch.utils.data import Dataset, DataLoader
from typing import List, Dict, Any, Optional, Union
import random
from tokenizer import LLMTokenizer


class PretrainDataset(Dataset):
    """预训练数据集"""
    
    def __init__(
        self,
        data_path: str,
        tokenizer: LLMTokenizer,
        max_length: int = 512,
        stride: int = 256
    ):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.stride = stride
        self.data = []
        
        # 加载数据
        self._load_data(data_path)
    
    def _load_data(self, data_path: str):
        """加载预训练数据"""
        with open(data_path, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    data = json.loads(line)
                    text = data.get('text', '')
                    if text:
                        self.data.append(text)
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        text = self.data[idx]
        
        # 编码文本
        input_ids = self.tokenizer.encode(text, add_special_tokens=True)
        
        # 如果文本太长，进行滑动窗口切分
        if len(input_ids) > self.max_length:
            start_idx = random.randint(0, len(input_ids) - self.max_length)
            input_ids = input_ids[start_idx:start_idx + self.max_length]
        
        # 创建标签（下一个token预测）
        labels = input_ids[1:] + [self.tokenizer.eos_token_id]
        input_ids = input_ids[:-1]
        
        # 创建损失掩码（所有位置都参与训练）
        loss_mask = torch.ones(len(input_ids), dtype=torch.float)
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'labels': torch.tensor(labels, dtype=torch.long),
            'loss_mask': loss_mask
        }


class SFTDataset(Dataset):
    """监督微调数据集"""
    
    def __init__(
        self,
        data_path: str,
        tokenizer: LLMTokenizer,
        max_length: int = 512,
        chat_template: Optional[str] = None
    ):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.chat_template = chat_template or self._default_chat_template()
        self.data = []
        
        # 加载数据
        self._load_data(data_path)
    
    def _default_chat_template(self) -> str:
        """默认聊天模板"""
        return "用户：{user}\n助手：{assistant}"
    
    def _load_data(self, data_path: str):
        """加载SFT数据"""
        with open(data_path, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    data = json.loads(line)
                    conversations = data.get('conversations', [])
                    if conversations:
                        self.data.append(conversations)
    
    def _format_conversation(self, conversations: List[Dict[str, str]]) -> str:
        """格式化对话"""
        formatted_text = ""
        for conv in conversations:
            role = conv.get('role', '')
            content = conv.get('content', '')
            if role == 'user':
                formatted_text += f"用户：{content}\n"
            elif role == 'assistant':
                formatted_text += f"助手：{content}\n"
        return formatted_text.strip()
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        conversations = self.data[idx]
        text = self._format_conversation(conversations)
        
        # 编码文本
        input_ids = self.tokenizer.encode(text, add_special_tokens=True)
        
        # 截断或填充
        if len(input_ids) > self.max_length:
            input_ids = input_ids[:self.max_length]
        else:
            input_ids = input_ids + [self.tokenizer.pad_token_id] * (self.max_length - len(input_ids))
        
        # 创建标签
        labels = input_ids[1:] + [self.tokenizer.eos_token_id]
        input_ids = input_ids[:-1]
        
        # 创建损失掩码（只对助手回复部分计算损失）
        loss_mask = torch.zeros(len(input_ids), dtype=torch.float)
        
        # 找到助手回复的位置
        text_tokens = self.tokenizer.encode(text, add_special_tokens=False)
        assistant_start = -1
        for i, conv in enumerate(conversations):
            if conv.get('role') == 'assistant':
                # 计算助手回复在文本中的位置
                prefix_text = self._format_conversation(conversations[:i])
                prefix_tokens = self.tokenizer.encode(prefix_text, add_special_tokens=False)
                assistant_start = len(prefix_tokens) + 1  # +1 for "助手："
                break
        
        if assistant_start >= 0:
            # 只对助手回复部分计算损失
            start_idx = min(assistant_start, len(loss_mask))
            loss_mask[start_idx:] = 1.0
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'labels': torch.tensor(labels, dtype=torch.long),
            'loss_mask': loss_mask
        }


class DPODataset(Dataset):
    """DPO数据集"""
    
    def __init__(
        self,
        data_path: str,
        tokenizer: LLMTokenizer,
        max_length: int = 512
    ):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.data = []
        
        # 加载数据
        self._load_data(data_path)
    
    def _load_data(self, data_path: str):
        """加载DPO数据"""
        with open(data_path, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    data = json.loads(line)
                    chosen = data.get('chosen', [])
                    rejected = data.get('rejected', [])
                    if chosen and rejected:
                        self.data.append({
                            'chosen': chosen,
                            'rejected': rejected
                        })
    
    def _format_conversation(self, conversations: List[Dict[str, str]]) -> str:
        """格式化对话"""
        formatted_text = ""
        for conv in conversations:
            role = conv.get('role', '')
            content = conv.get('content', '')
            if role == 'user':
                formatted_text += f"用户：{content}\n"
            elif role == 'assistant':
                formatted_text += f"助手：{content}\n"
        return formatted_text.strip()
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        data = self.data[idx]
        
        # 格式化chosen和rejected对话
        chosen_text = self._format_conversation(data['chosen'])
        rejected_text = self._format_conversation(data['rejected'])
        
        # 编码文本
        chosen_ids = self.tokenizer.encode(chosen_text, add_special_tokens=True)
        rejected_ids = self.tokenizer.encode(rejected_text, add_special_tokens=True)
        
        # 截断
        if len(chosen_ids) > self.max_length:
            chosen_ids = chosen_ids[:self.max_length]
        if len(rejected_ids) > self.max_length:
            rejected_ids = rejected_ids[:self.max_length]
        
        # 创建标签
        chosen_labels = chosen_ids[1:] + [self.tokenizer.eos_token_id]
        chosen_input_ids = chosen_ids[:-1]
        rejected_labels = rejected_ids[1:] + [self.tokenizer.eos_token_id]
        rejected_input_ids = rejected_ids[:-1]
        
        # 创建损失掩码
        chosen_mask = torch.ones(len(chosen_input_ids), dtype=torch.float)
        rejected_mask = torch.ones(len(rejected_input_ids), dtype=torch.float)
        
        return {
            'x_chosen': torch.tensor(chosen_input_ids, dtype=torch.long),
            'y_chosen': torch.tensor(chosen_labels, dtype=torch.long),
            'mask_chosen': chosen_mask,
            'x_rejected': torch.tensor(rejected_input_ids, dtype=torch.long),
            'y_rejected': torch.tensor(rejected_labels, dtype=torch.long),
            'mask_rejected': rejected_mask
        }


def create_dataloader(
    dataset: Dataset,
    batch_size: int = 8,
    shuffle: bool = True,
    num_workers: int = 0,
    collate_fn: Optional[callable] = None
) -> DataLoader:
    """创建数据加载器"""
    
    def default_collate_fn(batch):
        """默认的批处理函数"""
        if isinstance(batch[0], dict):
            # 字典格式的批处理
            keys = batch[0].keys()
            return {
                key: torch.stack([item[key] for item in batch])
                for key in keys
            }
        else:
            # 元组格式的批处理
            return torch.utils.data.default_collate(batch)
    
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        collate_fn=collate_fn or default_collate_fn,
        pin_memory=True
    )


def create_pretrain_dataloader(
    data_path: str,
    tokenizer: LLMTokenizer,
    batch_size: int = 8,
    max_length: int = 512,
    **kwargs
) -> DataLoader:
    """创建预训练数据加载器"""
    dataset = PretrainDataset(data_path, tokenizer, max_length)
    return create_dataloader(dataset, batch_size, **kwargs)


def create_sft_dataloader(
    data_path: str,
    tokenizer: LLMTokenizer,
    batch_size: int = 8,
    max_length: int = 512,
    **kwargs
) -> DataLoader:
    """创建SFT数据加载器"""
    dataset = SFTDataset(data_path, tokenizer, max_length)
    return create_dataloader(dataset, batch_size, **kwargs)


def create_dpo_dataloader(
    data_path: str,
    tokenizer: LLMTokenizer,
    batch_size: int = 8,
    max_length: int = 512,
    **kwargs
) -> DataLoader:
    """创建DPO数据加载器"""
    dataset = DPODataset(data_path, tokenizer, max_length)
    return create_dataloader(dataset, batch_size, **kwargs)