import os
import platform
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from torch.utils.data import Dataset
from datasets import load_from_disk
from classification.bert_fc.bert_fc_predictor import BertFCPredictor
from classification.bert_fc.bert_fc_trainer import BertFCTrainer

# 设置模型路径及数据集路径
os_name = platform.system()
if os_name == "Windows":
    model_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\model\mengzi-t5-base'
    data_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\data\tot_data'
    print("当前执行环境是 Windows...")
elif os_name == "Linux":
    model_dir = r'/root/autodl-fs/models/meng_zi_t5'
    data_dir = r'/root/autodl-fs/data/nlp_ai/nlp_seq2seq/nlpcc_2017'
    print("当前执行环境是 Linux...")
else:
    raise ValueError("当前执行环境不是 Windows 也不是 Linux")

class CustomDataset(Dataset):
    def __init__(self, split, data_dir, num=None):
        # 从本地加载数据集
        dataset = load_from_disk(data_dir)  # 替换为你的数据集路径

        # 划分数据集为训练集和测试集
        split_dataset = dataset.train_test_split(test_size=0.15, seed=42)  # 85% 训练集，15% 测试集

        if split == 'train':
            self.split_data = split_dataset['train']
        elif split == 'test':
            self.split_data = split_dataset['test']
        else:
            raise ValueError("split 必须是 'train' 或 'test'")

    def __len__(self):
        return len(self.split_data)

    def __getitem__(self, idx):
        item = self.split_data[idx]
        return item['content'], item['stance']

class StanceDetection:
    def __init__(self, seed=0):
        # 设置随机种子
        self.seed = seed
        torch.manual_seed(seed)  # torch cpu随机种子
        torch.cuda.manual_seed_all(seed)  # torch gpu随机种子
        np.random.seed(seed)  # numpy随机种子

        # 加载数据集
        self.train_dataset = CustomDataset('train', data_dir=data_dir)
        self.dev_dataset = CustomDataset('test', data_dir=data_dir)
        self.test_dataset = CustomDataset('test', data_dir=data_dir)

        # 提取文本和标签
        self.train_texts, self.train_labels = self._extract_data(self.train_dataset)
        self.dev_texts, self.dev_labels = self._extract_data(self.dev_dataset)
        self.test_texts, self.test_labels = self._extract_data(self.test_dataset)

    def _extract_data(self, dataset):
        # 提取 content 和 stance，并转换为列表
        texts = []
        labels = []
        for item in dataset:
            text, label = item
            text = text[:400]  # 截断文本至500字符
            texts.append(text)
            labels.append(label)
        return texts, labels

    def train(self):
        # 实例化trainer，设置参数，开始训练
        self.trainer = BertFCTrainer(
            pretrained_model_dir='./model/bert-base-chinese',
            model_dir='./tmp/bertfc',
            learning_rate=5e-5,
            enable_parallel=True,
            loss_type='focal_loss',
        )
        self.trainer.train(
            self.train_texts, self.train_labels,
            validate_texts=self.dev_texts, validate_labels=self.dev_labels,
            batch_size=64, epoch=5
        )

    def evaluate(self):
        # 实例化predictor，加载模型
        self.predictor = BertFCPredictor(
            pretrained_model_dir='./model/bert-base-chinese',
            model_dir='./tmp/bertfc',
            enable_parallel=True
        )
        predict_labels = self.predictor.predict(self.test_texts, batch_size=64)

        # 评估
        test_acc = accuracy_score(self.test_labels, predict_labels)
        print('Test Accuracy:', test_acc)

if __name__ == "__main__":
    # 实例化启动类并运行训练和评估
    stance_detection = StanceDetection(seed=0)
    stance_detection.train()
    stance_detection.evaluate()