import os
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.model_selection import train_test_split

class TextDataset(Dataset):
    def __init__(self, texts, labels=None, max_len=100):
        self.texts = texts
        self.labels = labels
        self.max_len = max_len
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        if len(text) > self.max_len:
            text = text[:self.max_len]
        else:
            text = text + [0] * (self.max_len - len(text))
            
        text = torch.tensor(text, dtype=torch.long)
        
        if self.labels is not None:
            label = torch.tensor(self.labels[idx], dtype=torch.long)
            return text, label
        return text

def create_sample_data():
    """创建示例数据集"""
    print("正在创建示例数据集...")
    
    # 创建data目录
    os.makedirs('data', exist_ok=True)
    
    # 生成参数
    n_train_samples = 1000  # 训练样本数（实际20w）
    n_test_samples = 200    # 测试样本数（实际5w）
    vocab_size = 70         # 词表大小
    max_seq_len = 100       # 最大序列长度
    n_classes = 14          # 类别数量
    
    # 生成随机文本序列
    def generate_sequence():
        length = np.random.randint(20, max_seq_len)
        return ' '.join(map(str, np.random.randint(0, vocab_size, size=length)))
    
    # 创建训练集
    train_data = {
        'text': [generate_sequence() for _ in range(n_train_samples)],
        'label': np.random.randint(0, n_classes, size=n_train_samples)
    }
    train_df = pd.DataFrame(train_data)
    
    # 创建测试集
    test_data = {
        'text': [generate_sequence() for _ in range(n_test_samples)]
    }
    test_df = pd.DataFrame(test_data)
    
    # 保存数据集
    train_df.to_csv('./data/train_set.csv', sep='\t', index=False)
    test_df.to_csv('./data/test_a.csv', sep='\t', index=False)
    
    print(f"已创建示例数据：")
    print(f"- 训练集：{n_train_samples}条样本")
    print(f"- 测试集：{n_test_samples}条样本")

def load_and_process_data(train_path, test_path):
    """加载和处理数据"""
    print("正在读取训练集...")
    train_df = pd.read_csv(train_path, sep='\t')
    print("正在读取测试集...")
    test_df = pd.read_csv(test_path, sep='\t')
    
    def process_text(text):
        return [int(x) for x in text.split()]
    
    train_texts = train_df['text'].apply(process_text).values
    train_labels = train_df['label'].values
    test_texts = test_df['text'].apply(process_text).values
    
    return train_texts, train_labels, test_texts

def create_data_loaders(train_texts, train_labels, test_texts, batch_size=64):
    """创建数据加载器"""
    # 划分训练集和验证集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        train_texts, train_labels, test_size=0.2, random_state=42
    )
    
    # 创建数据集
    train_dataset = TextDataset(train_texts, train_labels)
    val_dataset = TextDataset(val_texts, val_labels)
    test_dataset = TextDataset(test_texts)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    
    return train_loader, val_loader, test_loader

