import torch
import datasets
import pandas as pd
from datasets import load_dataset
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn as nn
from transformers import BertTokenizer, BertModel
import torch.optim as optim
from torch.nn.functional import one_hot
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar
from torchmetrics import Accuracy, Precision, Recall, F1Score
from pytorch_lightning import Trainer

# 自定义数据集类
class MydataSet(Dataset):
    def __init__(self, path, split):
        # 从CSV文件加载数据
        self.df = pd.read_csv(path)
        self.dataset = datasets.Dataset.from_pandas(self.df)

    def __getitem__(self, item):
        text = self.dataset[item]['text']
        label = self.dataset[item]['label']
        return text, label

    def __len__(self):
        return len(self.dataset)

# 定义批次处理函数
def collate_fn(data):
    sents = [i[0] for i in data]
    labels = [i[1] for i in data]

    # 对文本进行分词和编码
    token = BertTokenizer.from_pretrained('bert-base-chinese')
    data = token.batch_encode_plus(
        batch_text_or_text_pairs=sents,
        truncation=True,
        padding='max_length',
        max_length=216,
        return_tensors='pt',
        return_length=True,
    )

    input_ids = data['input_ids']
    attention_mask = data['attention_mask']
    token_type_ids = data['token_type_ids']
    labels = torch.tensor(labels)

    return input_ids, attention_mask, token_type_ids, labels

# 定义模型
class BiLSTMClassifier(nn.Module):
    def __init__(self, drop, hidden_dim, output_dim):
        super(BiLSTMClassifier, self).__init__()
        self.drop = drop
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim

        self.embedding = BertModel.from_pretrained('bert-base-chinese')
        for param in self.embedding.parameters():
            param.requires_grad_(False)
        self.lstm = nn.LSTM(input_size=768, hidden_size=self.hidden_dim, num_layers=2, batch_first=True,
                           bidirectional=True, dropout=self.drop)
        self.fc = nn.Linear(self.hidden_dim * 2, self.output_dim)

    def forward(self, input_ids, attention_mask, token_type_ids):
        embedded = self.embedding(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        embedded = embedded.last_hidden_state
        out, (h_n, c_n) = self.lstm(embedded)
        output = torch.cat((h_n[-2, :, :], h_n[-1, :, :]), dim=1)
        output = self.fc(output)
        return output

# 定义PyTorch Lightning模块
class BiLSTMLighting(pl.LightningModule):
    def __init__(self, drop, hidden_dim, output_dim, class_num=3):
        super(BiLSTMLighting, self).__init__()
        # 初始化模型
        self.model = BiLSTMClassifier(drop, hidden_dim, output_dim)
        # 定义损失函数
        self.criterion = nn.CrossEntropyLoss()
        # 加载数据集
        self.train_dataset = MydataSet('/kaggle/working/train.csv', 'train')
        self.val_dataset = MydataSet('/kaggle/working/val.csv', 'train')
        self.test_dataset = MydataSet('/kaggle/working/test.csv', 'train')
        
        # 初始化评估指标
        self.class_num = class_num
        self.accuracy = Accuracy(task="multiclass", num_classes=class_num)
        self.precision = Precision(task="multiclass", num_classes=class_num, average="none")
        self.recall = Recall(task="multiclass", num_classes=class_num, average="none")
        self.f1 = F1Score(task="multiclass", num_classes=class_num, average="none")
        self.avg_precision = Precision(task="multiclass", num_classes=class_num, average="weighted")
        self.avg_recall = Recall(task="multiclass", num_classes=class_num, average="weighted")
        self.avg_f1 = F1Score(task="multiclass", num_classes=class_num, average="weighted")

    def log_score(self, name, scores):
        for i, score_class in enumerate(scores):
            self.log(f"{name}_class{i}", score_class)

    def training_step(self, batch, batch_idx):
        input_ids, attention_mask, token_type_ids, labels = batch
        y = one_hot(labels.long() + 1, num_classes=3)
        y = y.to(dtype=torch.float)
        y_hat = self.model(input_ids, attention_mask, token_type_ids)
        loss = self.criterion(y_hat, y)
        self.log('train_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
        return loss

    def validation_step(self, batch, batch_idx):
        input_ids, attention_mask, token_type_ids, labels = batch
        y = one_hot(labels.long() + 1, num_classes=3)
        y = y.to(dtype=torch.float)
        y_hat = self.model(input_ids, attention_mask, token_type_ids)
        loss = self.criterion(y_hat, y)
        self.log('val_loss', loss, prog_bar=False, logger=True, on_step=True, on_epoch=True)
        return loss

    def configure_optimizers(self):
        optimizer = optim.Adam(self.parameters(), lr=lr)
        return optimizer

    def train_dataloader(self):
        return DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn, num_workers=4)

    def val_dataloader(self):
        return DataLoader(self.val_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn, num_workers=4)

    def test_dataloader(self):
        return DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn, num_workers=4)

def train():
    # 添加过拟合回调函数
    early_stop_callback = EarlyStopping(
        monitor='val_loss',
        patience=4,
        min_delta=0.0,
        verbose=True,
    )

    # 添加保存最佳模型的回调函数
    checkpoint_callback = ModelCheckpoint(
        monitor='val_loss',
        dirpath='/kaggle/working/checkpoints/',
        filename='model-{epoch:02d}-{val_loss:.2f}',
        save_top_k=1,
        mode='min'
    )

    # 训练器配置
    trainer = Trainer(
        max_epochs=epochs,
        log_every_n_steps=10,
        accelerator='auto',
        devices='auto',
        fast_dev_run=False,
        callbacks=[checkpoint_callback, TQDMProgressBar(refresh_rate=10)]
    )

    # 打印GPU信息
    if torch.cuda.is_available():
        print(f"当前使用的设备: GPU")
        print(f"GPU型号: {torch.cuda.get_device_name(0)}")

    model = BiLSTMLighting(drop=dropout, hidden_dim=rnn_hidden, output_dim=class_num)
    trainer.fit(model)
    trainer.save_checkpoint('/kaggle/working/model.ckpt')
    print("模型已保存到 /kaggle/working/model.ckpt")
    return model

if __name__ == '__main__':
    # 定义超参数
    batch_size = 32
    epochs = 4
    dropout = 0.2
    rnn_hidden = 512
    rnn_layer = 1
    class_num = 3
    lr = 0.002
    PATH = '/kaggle/working/checkpoints/'

    # 初始化分词器
    token = BertTokenizer.from_pretrained('bert-base-chinese')
    
    # 训练模型
    model = train()
    
    