import json
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer

class ChineseMedicalDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=512):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.data = []
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                
                if not isinstance(data, list):
                    print("Error: Data is not a list of items.")
                    return
                
                for item in data:
                    if 'id' not in item or 'text' not in item or 'annotations' not in item:
                        print(f"Skipping item with missing fields: {item}")
                        continue
                    
                    text = item['text']
                    annotations = item['annotations']
                    
                    if not isinstance(annotations, list):
                        print(f"Annotations is not a list: {annotations}")
                        continue
                    
                    for annotation in annotations:
                        if 'Q' not in annotation or 'A' not in annotation:
                            print(f"Skipping annotation with missing 'Q' or 'A': {annotation}")
                            continue
                        
                        question = annotation['Q']
                        answer = annotation['A']
                        
                        # 将文本和问题组合成输入
                        input_text = f"问题：{question} 文本：{text}"
                        
                        # 编码输入文本
                        encoding = self.tokenizer(
                            input_text,
                            max_length=self.max_length,
                            padding='max_length',
                            truncation=True,
                            return_tensors='pt'
                        )
                        
                        # 获取 Encoding 对象
                        encoding_obj = self.tokenizer.encode_plus(
                            input_text,
                            max_length=self.max_length,
                            padding='max_length',
                            truncation=True,
                            return_tensors='pt',
                            return_offsets_mapping=True
                        )
                        
                        encoding = {
                            'input_ids': encoding_obj['input_ids'].squeeze(),
                            'attention_mask': encoding_obj['attention_mask'].squeeze(),
                            'offset_mapping': encoding_obj['offset_mapping'].squeeze()
                        }
                        
                        # 找到答案在输入文本中的位置
                        answer_start = input_text.find(answer)
                        if answer_start == -1:
                            print(f"Answer not found in input text: {answer}")
                            continue
                        
                        answer_end = answer_start + len(answer) - 1
                        
                        # 将字符位置转换为 token 位置
                        start_positions = None
                        end_positions = None
                        for i, (start, end) in enumerate(encoding['offset_mapping']):
                            if start <= answer_start <= end:
                                start_positions = i
                            if start <= answer_end <= end:
                                end_positions = i
                        
                        if start_positions is None or end_positions is None:
                            print(f"Start or end positions are None for answer: {answer}")
                            continue
                        
                        encoding['start_positions'] = torch.tensor(start_positions)
                        encoding['end_positions'] = torch.tensor(end_positions)
                        self.data.append(encoding)
        except Exception as e:
            print(f"Error loading or processing data: {e}")
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data[idx]

def get_dataloader(file_path, tokenizer, batch_size=32, max_length=512, shuffle=True):
    dataset = ChineseMedicalDataset(file_path, tokenizer, max_length)
    if len(dataset) == 0:
        raise ValueError("Dataset is empty. Please check the data file and processing logic.")
    
    return DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=shuffle,
        collate_fn=lambda x: {k: torch.stack([item[k] for item in x]) for k in x[0]}
    )