import torch
from torch import nn
from torch.optim import Adam, AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau

from cnn_train.CnnStockConfig import CnnStockConfig


class CnnStockTrain:

    def __init__(self, model, train_loader, test_loader):
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.model = model
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = Adam(self.model.parameters(), lr=CnnStockConfig.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', patience=5)

    def train(self):
        """
        模型训练函数
        :return:
        """
        best_val_acc = 0.0
        no_improve_epochs = 0

        for epoch in range(CnnStockConfig.num_epochs):
            self.model.train()
            train_loss, correct, total = 0.0, 0, 0

            for inputs, labels in self.train_loader:
                inputs, labels = inputs.to(CnnStockConfig.device), labels.to(CnnStockConfig.device).squeeze()

                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)

                self.optimizer.zero_grad()
                loss.backward()
                # clip_grad_norm(self.model.parameters(), 1)
                self.optimizer.step()

                train_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

            # 验证
            self.model.eval()
            val_correct, val_total = 0, 0
            with torch.no_grad():
                for inputs, labels in self.test_loader:
                    inputs, labels = inputs.to(CnnStockConfig.device), labels.to(CnnStockConfig.device).squeeze()
                    outputs = self.model(inputs)
                    _, predicted = outputs.max(1)
                    val_total += labels.size(0)
                    val_correct += (predicted == labels).sum().item()

            train_loss /= len(self.train_loader)
            train_acc = 100 * correct / total
            val_acc = 100 * val_correct / val_total

            # self.scheduler.step(val_acc)

            if val_acc > best_val_acc:
                best_val_acc = val_acc
                no_improve_epochs = 0
                torch.save(self.model.state_dict(), 'best_model.pth')
            else:
                no_improve_epochs += 1

                # 打印日志
                print(f'Epoch {epoch + 1:03d} | '
                      f'Train Loss: {train_loss:.4f} | '
                      f'Train Acc: {train_acc:.2f}% | '
                      f'Val Acc: {val_acc:.2f}% | '
                      f'LR: {self.optimizer.param_groups[0]["lr"]:.2e}')

                if no_improve_epochs >= CnnStockConfig.early_stop_patience:
                    print(f'Early stopping at epoch {epoch + 1}')
                    break

        print(f'Best Validation Accuracy: {best_val_acc:.2f}%')
