import json
import os
import torch
from torch.utils.data import Dataset, DataLoader, DistributedSampler
from transformers import AutoTokenizer

from .base_dataset import BaseDatasetPlugin

# 设置环境变量以避免tokenizers并行警告
os.environ["TOKENIZERS_PARALLELISM"] = "false"


class PretrainDataset(Dataset):
    """预训练数据集"""
    
    def __init__(self, data_path, tokenizer, max_length=512):
        self.data_path = data_path
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.samples = self.load_data(data_path)
                
    def load_data(self, path):
        samples = []
        with open(path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f, 1):
                data = json.loads(line.strip())
                samples.append(data)
        return samples
        
    def __len__(self):
        return len(self.samples)
        
    def __getitem__(self, index):
        sample = self.samples[index]

        # 构建输入文本
        encoding = self.tokenizer(
            str(sample['text']),
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        input_ids = encoding.input_ids.squeeze()
        loss_mask = (input_ids != self.tokenizer.pad_token_id)

        X = torch.tensor(input_ids[:-1], dtype=torch.long)
        Y = torch.tensor(input_ids[1:], dtype=torch.long)
        loss_mask = torch.tensor(loss_mask[1:], dtype=torch.long)
        
        # 返回字典格式，与训练代码兼容
        return {
            'input_ids': X,
            'labels': Y,
            'loss_mask': loss_mask
        }


class PretrainDatasetPlugin(BaseDatasetPlugin):
    """预训练数据集插件"""
    
    def create_dataset(self, config):
        """创建预训练数据集实例"""
        tokenizer_path = config.get('tokenizer_path', './model/tokenizer_default')
        print(f"[INFO] 尝试加载分词器，路径: {tokenizer_path}")
        
        # 检查tokenizer路径是否存在
        if os.path.exists(tokenizer_path):
            print(f"[INFO] 从指定路径加载分词器: {tokenizer_path}")
            tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
        else:
            print(f"[WARNING] 指定分词器路径不存在: {tokenizer_path}")
            # 检查项目目录中是否存在默认分词器
            default_tokenizer_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'model')
            if os.path.exists(default_tokenizer_path):
                print(f"[INFO] 使用默认分词器路径: {default_tokenizer_path}")
                tokenizer = AutoTokenizer.from_pretrained(default_tokenizer_path)
            else:
                print(f"[ERROR] 无法找到本地分词器，尝试使用默认GPT2分词器")
                try:
                    tokenizer = AutoTokenizer.from_pretrained('gpt2')
                except Exception as e:
                    print(f"[ERROR] 无法从HuggingFace加载GPT2分词器: {e}")
                    print(f"[INFO] 创建简单的默认分词器")
                    from transformers import GPT2Tokenizer
                    # 创建一个基本的分词器配置
                    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
                
        # 确保设置了pad token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
            
        data_path = config.get('data_path', '../dataset/pretrain_hq.jsonl')
        print(f"[INFO] 检查数据集文件是否存在: {data_path}")
        if not os.path.exists(data_path):
            raise FileNotFoundError(f"数据文件不存在: {data_path}")
        print(f"[INFO] 数据集文件存在，准备加载")
            
        dataset = PretrainDataset(
            data_path=data_path,
            tokenizer=tokenizer,
            max_length=config.get('max_seq_len', 512)
        )
        return dataset
        
    def create_dataloader(self, dataset, config):
        """创建预训练数据加载器"""
        print(f"[INFO] 创建数据加载器")
        ddp = int(os.environ.get("RANK", -1)) != -1
        use_accelerate = config.get('use_accelerate', False)
        print(f"[INFO] 分布式训练状态 - DDP: {ddp}, Accelerate: {use_accelerate}")
        
        # 检查是否在MPS设备上运行，如果是则禁用pin_memory
        import torch
        device = config.get('device', 'cpu')
        is_mps = 'mps' in str(device).lower() or (torch.backends.mps.is_available() and torch.backends.mps.is_built())
        # 与MiniMind保持一致的pin_memory设置
        pin_memory = not use_accelerate and not is_mps
        print(f"[INFO] 设备类型: {device}, MPS设备: {is_mps}, 启用pin_memory: {pin_memory}")
        
        # 统一处理采样器逻辑
        if ddp and not use_accelerate:
            print(f"[INFO] 使用分布式采样器")
            sampler = DistributedSampler(dataset)
        else:
            sampler = None
            print(f"[INFO] 使用标准采样器")
            
        dataloader = DataLoader(
            dataset,
            batch_size=config.get('batch_size', 32),
            pin_memory=pin_memory,
            drop_last=False,
            shuffle=(sampler is None),
            num_workers=config.get('num_workers', 1),
            sampler=sampler
        )
        print(f"[INFO] 数据加载器创建完成，批次大小: {config.get('batch_size', 32)}")
        return dataloader