import pandas as pd
import torch
from torch.utils.data import Dataset
import pickle
import os


import jieba

def tokenize(text):
    return list(jieba.cut(text))

class TextDataset(Dataset):
    def __init__(self, data, vocab):
        self.data = data
        self.vocab = vocab

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        text, label = self.data[idx]
        encoded = [self.vocab.get(word, self.vocab['<unk>']) for word in text.split()]
        return torch.tensor(encoded, dtype=torch.long), torch.tensor(label, dtype=torch.long)

def build_vocab(texts, min_freq=1):
    word_freq = {}
    for text in texts:
        for word in tokenize(text):
            word_freq[word] = word_freq.get(word, 0) + 1
    vocab = {'<pad>':0, '<unk>':1}

    idx = 2
    for word, freq in word_freq.items():
        if freq >= min_freq:
            vocab[word] = idx
            idx += 1
    return vocab

def load_data():
    train_data = pd.read_csv('dataset/train.csv')
    val_data = pd.read_csv('dataset/val.csv')

    all_texts = list(train_data['text']) + list(val_data['text'])
    vocab = build_vocab(all_texts)
    print("Vocab size:")
    print(len(vocab))

    with open('dataset/vocab.pkl', 'wb') as f:
        pickle.dump(vocab, f)

    train_dataset = list(zip(train_data['text'], train_data['label']))
    val_dataset = list(zip(val_data['text'], val_data['label']))
    return train_dataset, val_dataset, vocab
