# backend/models/dataset.py
import json
from pathlib import Path

import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer
import torch


class ArxivDataset:
    def __init__(self, data_path, tokenizer_path, max_length=128):
        self.data = self._load_data(data_path)
        self.tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
        self.max_length = max_length

    def _load_data(self, path):
        data_path = Path(__file__).parent.parent / path
        # 读取前1000条作为小样本
        with open(data_path, 'r', encoding='utf-8') as f:
            lines = [json.loads(line) for line in f.readlines()[:1000]]

        df = pd.DataFrame(lines)
        df = df[['title', 'abstract', 'categories']].dropna()

        # 转换为category类型
        df['categories'] = df['categories'].astype('category')
        df['label'] = df['categories'].cat.codes  # 新增label列
        print(f"成功加载数据 {len(df)} 条，类别数：{len(df['categories'].cat.categories)}")
        print(f"类别映射示例：{dict(enumerate(df['categories'].cat.categories))}")
        return df

    def get_dataset(self):
        texts = (self.data['title'] + " " + self.data['abstract']).tolist()
        labels = self.data['categories'].astype('category').cat.codes.tolist()
        return train_test_split(texts, labels, test_size=0.2)

    def _tokenize(self, text):
        return self.tokenizer(
            text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

    def __getitem__(self, idx):
        row = self.data.iloc[idx]
        tokenized = self.tokenizer(
            text=row['title'] + " " + row['abstract'],
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        return {
            'input_ids': tokenized['input_ids'].squeeze(0),
            'attention_mask': tokenized['attention_mask'].squeeze(0),
            'labels': torch.tensor(row['label'], dtype=torch.long)  # 直接使用预存标签
        }

    def __len__(self):
        return len(self.data)