import os
import json
import numpy as np
from typing import List, Dict, Tuple
from scipy.special import softmax
from datasets import Dataset
from evaluate import load
from transformers import (
    AutoTokenizer, 
    AutoModelForSequenceClassification, 
    DataCollatorWithPadding, 
    TrainingArguments, 
    Trainer
)


def data_split(data_path: str, split_ratio: float = 0.8):
    with open(data_path, 'r', encoding='utf-8') as data_file:
        json_data: List[Dict[str, str]] = json.load(data_file)
    
    dataset = Dataset.from_dict({key: [item[key] for item in json_data] for key in json_data[0]})
    split_dataset = dataset.train_test_split(train_size=split_ratio, seed=10)
    train_data = split_dataset.get('train')
    test_data = split_dataset.get('test')

    return train_data, test_data


def text_encoding(dataset: Dataset):
    encoding = tokenizer(text=dataset['query'], max_length=512, padding=True, truncation=True)
    encoding['labels'] = label2id[dataset['label']]
    
    return encoding


def compute_metrics(eval_pred: Tuple):
    predictions, labels = eval_pred
    probs = softmax(predictions, axis=1)
    predictions = np.argmax(predictions, axis=1)
    
    # 计算评价指标
    accuracy_metric = accuracy.compute(predictions=predictions, references=labels)
    f1_metric = f1.compute(predictions=predictions, references=labels)
    roc_auc_metric = roc_auc.compute(prediction_scores=probs[:, 1], references=labels)
    
    return {**accuracy_metric, **f1_metric, **roc_auc_metric}


if __name__ == '__main__':
    data_path = 'your_data_path'
    clf_model = 'your_model_path'
    
    # 判断当前环境
    if not os.path.exists(clf_model):
        clf_model = 'hfl/chinese-roberta-wwm-ext'
    
    train_data, test_data = data_split(data_path)
    tokenizer = AutoTokenizer.from_pretrained(
        clf_model, 
        clean_up_tokenization_spaces=True
    )

    label2id = {'nonmed': 0, 'med': 1}
    id2label = {0: 'nonmed', 1: 'med'}
    model = AutoModelForSequenceClassification.from_pretrained(
        clf_model, 
        num_labels=len(label2id), 
        label2id=label2id, 
        id2label=id2label
    )

    encoder_train = train_data.map(text_encoding, remove_columns=train_data.column_names)
    encoder_test = test_data.map(text_encoding, remove_columns=test_data.column_names)
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

    # 计算评估指标
    accuracy = load("accuracy")
    f1 = load("f1", "binary")
    roc_auc = load("roc_auc", "binary")

    training_args = TrainingArguments(
        output_dir="output",
        num_train_epochs=2,
        learning_rate=3e-5,
        lr_scheduler_type="cosine",
        per_device_train_batch_size=16,
        per_device_eval_batch_size=16,
        weight_decay=0.01,
        warmup_ratio=0.02,
        logging_steps=0.01,
        logging_strategy="steps",
        fp16=True,
        eval_strategy="steps",
        eval_steps=0.1,
        save_strategy='epoch'
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=encoder_train,
        eval_dataset=encoder_test,
        tokenizer=tokenizer,
        data_collator=data_collator,
        compute_metrics=compute_metrics
    )

    # 训练
    trainer.train()
    
    # 验证
    trainer.evaluate()
    