# -*- encoding: utf-8 -*-
'''
@File    :   txt_classify.py
@Time    :   2024/10/04 10:40:35
@Author  :   zhangqidong
@email   :   835439833@qq.com
@info    :   文本分类
'''

import torch
from transformers import BertTokenizer, BertModel
from torch import nn, cuda
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import os
from tqdm import tqdm


def get_data(data_root):
    txts = os.listdir(data_root)[:100]      # 全量文本太多了，截取100个
    data_list = []
    for txt in txts:
        txt_path = os.path.join(data_root, txt)
        with open(txt_path, "r", encoding='utf-8') as f:
            data_list.append(f.read())
    return data_list


class BertClassifier(nn.Module):
    def __init__(self, bert_model, num_classes):
        super(BertClassifier, self).__init__()
        self.bert = bert_model
        self.classifier = nn.Linear(bert_model.config.hidden_size, num_classes)

    def forward(self, input):
        outputs = self.bert(**input)
        cls_vector = outputs.pooler_output
        logits = self.classifier(cls_vector)
        return logits


class BertDataset(Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __getitem__(self, idx):
        x = self.data[idx]
        label = self.label
        return x, label

    def __len__(self):
        return len(self.data)


class Trainer():
    def __init__(self) -> None:
        self.device = "cuda" if cuda.is_available() else "cpu"
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.bert_model = BertModel.from_pretrained('bert-base-uncased')
        self.model = BertClassifier(bert_model=self.bert_model, num_classes=2).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=5e-5)
        self.loss_fn = nn.CrossEntropyLoss()

    def train(self, num_epochs):
        neg_dataset = BertDataset(data=get_data(r"data\aclImdb_v1\aclImdb\train\neg"), label=0)
        pos_dataset = BertDataset(data=get_data(r"data\aclImdb_v1\aclImdb\train\pos"), label=1)
        dataloader = DataLoader(ConcatDataset([neg_dataset, pos_dataset]), 10, shuffle=True, drop_last=True)
        self.model.train()
        for epoch in range(num_epochs):
            t = tqdm(dataloader, desc=f'[epoch {epoch}]')
            for x, labels in t:
                labels = labels.to(self.device)
                inputs = self.tokenizer(x, return_tensors='pt', padding=True, truncation=True).to(self.device)      # 分词和编码

                self.optimizer.zero_grad()
                outputs = self.model(inputs)
                loss = self.loss_fn(outputs, labels)
                loss.backward()
                self.optimizer.step()

                t.set_postfix(loss=loss.item())

    def test(self):
        neg_dataset = BertDataset(data=get_data(r"data\aclImdb_v1\aclImdb\test\neg"), label=0)
        pos_dataset = BertDataset(data=get_data(r"data\aclImdb_v1\aclImdb\test\pos"), label=1)
        dataloader = DataLoader(ConcatDataset([neg_dataset, pos_dataset]), 10, shuffle=True, drop_last=True)
        t = tqdm(dataloader, desc=f'[test]')
        self.model.eval()
        for x, labels in dataloader:
            labels = labels.to(self.device)
            inputs = self.tokenizer(x, return_tensors='pt', padding=True, truncation=True).to(self.device)      # 分词和编码

            outputs = self.model(inputs)
            loss = self.loss_fn(outputs, labels)

    def main(self, num_epochs):
        self.train(num_epochs)
        self.test()


if __name__ == '__main__':
    trainer = Trainer()
    trainer.main(10)
