import logging
import os

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from config import config
from mydataloader import MyDataLoader
from mymodel import MyLSTMModel

logger = logging.getLogger(__name__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class Trainer:
    def __init__(self):

        self.best_loss = float('inf')
        self.train_loss = []
        self.val_loss = []
        self.max_patience = 10
        self.patience = 0
        self.loader = MyDataLoader()

        self.model = MyLSTMModel(num_cities=self.loader.get_cities_length(), num_attrs=self.loader.get_attributes_length()).to(device)
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=1e-3,
            weight_decay=1e-4
        )
        self.max_epoch = 300

        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='min', factor=0.5, patience=4
        )

    def custom_loss(self, outputs, targets):
        def direction_accuracy_loss(outputs, targets):
            sign_output = torch.sign(outputs)
            sign_target = torch.sign(targets)
            correct = (sign_output == sign_target).float()
            return 1.0 - correct.mean()

        mse = F.mse_loss(outputs, targets)
        mae = F.l1_loss(outputs, targets)
        direction_penalty = direction_accuracy_loss(outputs, targets)
        return mse + 0.5 * mae + 2.0 * direction_penalty

    def train(self, train_loader: DataLoader):
        self.model.train()
        loss_list = []
        mae_list = []

        for inputs, targets, city_ids, attr_ids in train_loader:
            inputs = inputs.to(device).float()
            targets = targets.to(device).float()
            city_ids = city_ids.to(device)
            attr_ids = attr_ids.to(device)

            self.optimizer.zero_grad()

            outputs = self.model(inputs, city_ids, attr_ids)

            loss = self.custom_loss(outputs, targets)
            loss.backward()
            nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
            self.optimizer.step()

            loss_list.append(loss.item())
            mae = F.l1_loss(outputs, targets, reduction='mean')
            mae_list.append(mae.item())

        avg_loss = np.mean(loss_list)
        avg_mae = np.mean(mae_list)

        return avg_loss, avg_mae

    def validate(self, val_loader: DataLoader):
        self.model.eval()
        loss_list = []
        mae_list = []

        with torch.no_grad():
            for inputs, targets, city_ids, attr_ids in val_loader:
                inputs = inputs.to(device).float()
                targets = targets.to(device).float()
                city_ids = city_ids.to(device)
                attr_ids = attr_ids.to(device)

                outputs = self.model(inputs, city_ids, attr_ids)

                loss = self.custom_loss(outputs, targets)
                loss_list.append(loss.item())

                mae = F.l1_loss(outputs, targets, reduction='mean')
                mae_list.append(mae.item())

        avg_loss = np.mean(loss_list)
        avg_mae = np.mean(mae_list)

        return avg_loss, avg_mae

    def fit(self, train_loader: DataLoader, val_loader: DataLoader):
        writer = SummaryWriter()
        best_path = os.path.join(config.MODELS_DIR, 'model.pth')

        best_epoch = 0
        self.patience = 0
        train_metrics = {'loss': [], 'mae': []}
        val_metrics = {'loss': [], 'mae': []}

        # 使用 tqdm 包裹 epoch 循环，添加 ncols 控制宽度
        with tqdm(range(self.max_epoch), desc="Training", unit="epoch", ncols=150, colour="#F29339") as t:
            for epoch in t:
                train_loss, train_mae = self.train(train_loader)
                val_loss, val_mae = self.validate(val_loader)

                self.scheduler.step(val_loss)

                train_metrics['loss'].append(train_loss)
                train_metrics['mae'].append(train_mae)
                val_metrics['loss'].append(val_loss)
                val_metrics['mae'].append(val_mae)

                writer.add_scalars('Loss', {'train': train_loss, 'val': val_loss}, epoch)
                writer.add_scalars('MAE', {'train': train_mae, 'val': val_mae}, epoch)
                writer.add_scalar('LearningRate/lr', self.optimizer.param_groups[0]['lr'], epoch)

                # 更新 tqdm 显示内容（动态显示 early stopping 状态）
                t.set_postfix({
                    "patience": f"{self.patience}"
                })

                # 保存模型
                if val_loss < self.best_loss:
                    self.best_loss = val_loss
                    best_epoch = epoch
                    self.patience = 0
                    torch.save({
                        'model': self.model.state_dict(),
                        'optimizer': self.optimizer.state_dict(),
                        'epoch': epoch,
                        'lookback': self.loader.lookback,
                        'feature_dim': MyLSTMModel.input_dim
                    }, best_path)
                else:
                    self.patience += 1
                    if self.patience >= self.max_patience:
                        print(f"Early stopping at epoch {epoch + 1}")
                        torch.save({
                            'model': self.model.state_dict(),
                            'optimizer': self.optimizer.state_dict(),
                            'epoch': epoch,
                            'lookback': self.loader.lookback,
                            'feature_dim': MyLSTMModel.input_dim
                        }, os.path.join(config.MODELS_DIR, 'last_epoch_model.pth'))
                        break

        checkpoint = torch.load(best_path, map_location=device, weights_only=False)
        self.model.load_state_dict(checkpoint['model'])
        print(f"Best validation loss: {self.best_loss:.4f} at epoch {best_epoch}")
        return train_metrics, val_metrics

    def start(self):
        train_dataloader, test_dataloader, train_size, test_X, test_y_true = self.loader.get_train_dataset()
        # 模型保存路径
        os.makedirs(config.MODELS_DIR, exist_ok=True)

        self.fit(train_dataloader, test_dataloader)


if __name__ == '__main__':
    # 训练模型
    print("torch.get_num_threads()", torch.get_num_threads())
    # torch.set_num_threads(torch.get_num_threads())
    trainer = Trainer()
    trainer.start()
