import dataclasses
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer


@dataclasses.dataclass
class Config:
    unsloth_qwen3 = "unsloth/Qwen3-4B"
    roberta_base_finetuned_dianping_chinese = "uer/roberta-base-finetuned-dianping-chinese"
    device = "cuda"
    hfl_rbt3 = "hfl/rbt3"
    hfl_chinese_macbert_large = "hfl/chinese-macbert-large"
    hfl_chinese_macbert_base = "hfl/chinese-macbert-base"


def init_memory():
    # @title Show current memory stats
    gpu_stats = torch.cuda.get_device_properties(0)
    start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
    max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
    print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
    print(f"{start_gpu_memory} GB of memory reserved.")
    return start_gpu_memory, max_memory


def memory_info(start_gpu_memory, max_memory, trainer_stats):
    # @title Show final memory and time stats
    used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
    used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
    used_percentage = round(used_memory / max_memory * 100, 3)
    lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
    print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
    print(
        f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training."
    )
    print(f"Peak reserved memory = {used_memory} GB.")
    print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
    print(f"Peak reserved memory % of max memory = {used_percentage} %.")
    print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")

def init_dataset(tokenizer: AutoTokenizer):
    from datasets import Dataset
    ds = Dataset.from_csv("data/ChnSentiCorp_htl_all.csv")
    ds = ds.filter(lambda x: x["review"] is not None)
    ds = ds.train_test_split(test_size=0.1)

    def preprocess_function(examples, tokenizer=tokenizer):
        tokenized = tokenizer(examples["review"], truncation=True, max_length=128, padding="max_length")
        tokenized["labels"] = examples["label"]
        return tokenized

    tokenized_ds = ds.map(preprocess_function, batched=True, num_proc=4)
    return tokenized_ds["train"], tokenized_ds["test"]

def init_model_and_tokenizer(model_name: str = Config.hfl_rbt3):
    model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    return model, tokenizer


if __name__ == '__main__':
    from transformers import AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained(Config.hfl_rbt3)
    ds = init_dataset(tokenizer)
    print(ds[0][0])
    print(ds[1][0])






