import json
import os
import torch
from torch.utils.data import Dataset, DataLoader, DistributedSampler
from transformers import AutoTokenizer

from .base_dataset import BaseDatasetPlugin

# 设置环境变量以避免tokenizers并行警告
os.environ["TOKENIZERS_PARALLELISM"] = "false"


class SFTDataset(Dataset):
    """SFT数据集"""
    
    def __init__(self, data_path, tokenizer, max_length=1024):
        self.data_path = data_path
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.samples = self.load_data(data_path)
        # 确保tokenizer有pad_token
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
            
        # 获取特殊标记
        self.bos_token_id = self.tokenizer.bos_token_id if self.tokenizer.bos_token_id is not None else self.tokenizer.convert_tokens_to_ids("<|begin_of_sentence|>")
        self.eos_token_id = self.tokenizer.eos_token_id if self.tokenizer.eos_token_id is not None else self.tokenizer.convert_tokens_to_ids("<|end_of_sentence|>")

    def load_data(self, path):
        samples = []
        with open(path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f, 1):
                data = json.loads(line.strip())
                # 如果是预训练格式，转换为SFT格式
                if 'text' in data and 'conversations' not in data:
                    # 创建一个简单的对话格式
                    data = {
                        'conversations': [
                            {'content': '请继续以下内容', 'role': 'user'},
                            {'content': data['text'], 'role': 'assistant'}
                        ]
                    }
                samples.append(data)
        return samples

    def __len__(self):
        return len(self.samples)

    def _create_chat_prompt(self, conversations):
        """构建符合ChatML格式的对话"""
        messages = []
        for i, turn in enumerate(conversations):
            role = turn.get('role', 'user' if i % 2 == 0 else 'assistant')
            messages.append({"role": role, "content": turn['content']})
        
        # 检查tokenizer是否有chat_template，如果没有则使用简单格式
        if hasattr(self.tokenizer, 'chat_template') and self.tokenizer.chat_template is not None:
            return self.tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=False
            )
        else:
            # 使用简单的格式
            prompt = ""
            for msg in messages:
                prompt += f"<|{msg['role']}|>\n{msg['content']}\n"
            prompt += "<|assistant|>\n"
            return prompt

    def _generate_loss_mask(self, input_ids):
        """生成损失掩码，只对助手回复部分计算损失"""
        # 先找到所有assistant回复的开始和结束位置
        loss_mask = [0] * len(input_ids)
        
        # 查找助手回复部分 - 简化方法，查找<|assistant|>标记
        input_ids_list = input_ids if isinstance(input_ids, list) else input_ids.tolist()
        
        # 查找<|assistant|>标记
        assistant_tokens = self.tokenizer("<|assistant|>", add_special_tokens=False).input_ids
        i = 0
        while i < len(input_ids_list):
            # 查找<|assistant|>标记
            if (i <= len(input_ids_list) - len(assistant_tokens) and 
                input_ids_list[i:i + len(assistant_tokens)] == assistant_tokens):
                # 找到assistant部分的结束位置
                start_pos = i + len(assistant_tokens)
                end_pos = start_pos
                # 查找下一个特殊标记或结束
                while (end_pos < len(input_ids_list) and 
                       input_ids_list[end_pos] != self.tokenizer.pad_token_id and 
                       input_ids_list[end_pos] != self.tokenizer.eos_token_id):
                    end_pos += 1
                
                # 设置损失掩码
                for j in range(start_pos, min(end_pos, len(input_ids_list))):
                    loss_mask[j] = 1
                i = end_pos
            else:
                i += 1
                
        # 确保不会出现全零的loss_mask
        if sum(loss_mask) == 0:
            # 如果没有找到assistant部分，将最后一部分设置为计算损失
            loss_mask[-10:] = [1] * min(10, len(loss_mask))
                
        return loss_mask

    def __getitem__(self, index):
        sample = self.samples[index]
        # 构建对话提示
        prompt = self._create_chat_prompt(sample['conversations'])
        # 编码
        encoding = self.tokenizer(
            prompt,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        input_ids = encoding.input_ids.squeeze()
        attention_mask = encoding.attention_mask.squeeze()
        
        # 生成损失掩码
        loss_mask = self._generate_loss_mask(input_ids)
        loss_mask = torch.tensor(loss_mask, dtype=torch.long)
        
        # 构建训练数据
        X = input_ids[:-1]
        Y = input_ids[1:]
        loss_mask = loss_mask[1:]  # 对齐预测位置
        
        return {
            'input_ids': X,
            'labels': Y,
            'loss_mask': loss_mask
        }


class SFTDatasetPlugin(BaseDatasetPlugin):
    """SFT数据集插件"""
    
    def create_dataset(self, config):
        """创建SFT数据集实例"""
        tokenizer_path = config.get('tokenizer_path', '../model')
        
        # 检查tokenizer路径是否存在
        if os.path.exists(tokenizer_path):
            tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
        else:
            # 尝试使用默认分词器
            tokenizer = AutoTokenizer.from_pretrained('gpt2')
            
        # 确保设置了pad token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
            
        data_path = config.get('data_path', '../dataset/sft.jsonl')
        if not os.path.exists(data_path):
            raise FileNotFoundError(f"数据文件不存在: {data_path}")
            
        dataset = SFTDataset(
            data_path=data_path,
            tokenizer=tokenizer,
            max_length=config.get('max_seq_len', 512)
        )
        return dataset
        
    def create_dataloader(self, dataset, config):
        """创建SFT数据加载器"""
        ddp = int(os.environ.get("RANK", -1)) != -1
        use_accelerate = config.get('use_accelerate', False)
        
        # 统一处理采样器逻辑
        if ddp and not use_accelerate:
            sampler = DistributedSampler(dataset)
        else:
            sampler = None
            
        # 与MiniMind保持一致的pin_memory设置
        pin_memory = not use_accelerate
        
        dataloader = DataLoader(
            dataset,
            batch_size=config.get('batch_size', 32),
            pin_memory=pin_memory,
            drop_last=False,
            shuffle=(sampler is None),
            num_workers=config.get('num_workers', 1),
            sampler=sampler
        )
        return dataloader