# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2023/1/30
-------------------------------------------------
   Change Activity:
                   2023/1/30 10:29: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import csv

import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import GetConfig
from datalist import TextClassifyDataset
from model import BertClassifier


class Train:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 4, "pin_memory": False}

        train_lines = self.parse_data_from_csv(text_path="data/BBC News Train.csv")
        test_lines = self.parse_data_from_csv_test(text_path="data/BBC News Test.csv")

        train_data = TextClassifyDataset(train_lines)
        test_data = TextClassifyDataset(test_lines)

        self.train_dataloader = DataLoader(train_data, batch_size=self.args.train_batch_size, shuffle=True, **kwargs)
        self.test_dataloader = DataLoader(test_data, batch_size=self.args.test_batch_size, **kwargs)

        self.model = BertClassifier()

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if self.args.resume:
            self.model.load_state_dict(torch.load("weights/best.pth")["model_state_dict"])
        else:
            print("train from scratch")

        self.criterion = nn.CrossEntropyLoss()
        self.optim = torch.optim.Adam(self.model.parameters(), lr=self.args.lr,weight_decay=1e-5)

    def work(self):
        for epoch in range(1, self.args.epochs + 1):
            self.train(epoch)
            self.test(epoch)

        torch.cuda.empty_cache()
        print("model finished training")

    def train(self, epoch):
        self.model.train()
        total = 0
        total_acc = 0
        pbar = tqdm(self.train_dataloader, desc=f"Train Epoch :{epoch}/{self.args.epochs}")
        for data, label in pbar:
            label = label.to(self.device)
            mask = data["attention_mask"].to(self.device)
            input_id = data["input_ids"].squeeze(1).to(self.device)
            self.model.zero_grad()
            output = self.model(input_id, mask)

            loss = self.criterion(output, label)
            acc = (output.argmax(dim=1) == label).sum()
            total += label.shape[0]
            total_acc += acc

            loss.backward()
            self.optim.step()

            pbar.set_description(
                f"[Train Epoch]: {epoch}\t Loss: {loss.item()}\t Acc: {total_acc / total}"
            )

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()
        total = 0
        total_acc = 0
        pbar = tqdm(self.test_dataloader, desc=f"Test Epoch :{epoch}/{self.args.epochs}")
        for data, label in pbar:
            label = label.to(self.device)
            mask = data["attention_mask"].to(self.device)
            input_id = data["input_ids"].squeeze(1).to(self.device)
            output = self.model(input_id, mask)
            acc = (output.argmax(dim=1) == label).sum().item()
            total += label.shape[0]
            total_acc += acc

            pbar.set_description(
                f"[Test Epoch]: {epoch}\t Acc: {total_acc / total}"
            )

    @staticmethod
    def parse_data_from_csv(text_path):
        data = {}
        category = []
        text = []

        f_text = open(text_path)

        f_csv = csv.reader(f_text)

        next(f_csv)

        for row in f_csv:
            category.append(row[2])
            text.append(row[1])

        data["category"] = category
        data["text"] = text

        return data

    @staticmethod
    def parse_data_from_csv_test(text_path):
        data = {}
        category = []
        text = []

        f_text = open(text_path)
        f_index = open("data/BBC News Sample Solution.csv")

        f_csv = csv.reader(f_text)
        index_csv = csv.reader(f_index)

        next(f_csv)
        next(index_csv)
        category_index = {}
        for row in index_csv:
            category_index[row[0]] = row[1]

        for row in f_csv:
            category.append(category_index[row[0]])
            text.append(row[1])

        data["category"] = category
        data["text"] = text

        return data


if __name__ == "__main__":
    model = Train()
    model.work()
