import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils.class_weight import compute_class_weight
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns


class ModelTrainer:
    # 模型训练器

    def __init__(self, model, device=None):
        self.model = model
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)

    def train(self, X_train, y_train, X_test, y_test, num_epochs=100, batch_size=64,
              learning_rate=0.001, weight_decay=1e-5, patience=10):
        # 类别权重
        class_weights = compute_class_weight('balanced',
                                             classes=np.unique(y_train.numpy()),
                                             y=y_train.numpy())
        class_weights = torch.FloatTensor(class_weights).to(self.device)
        criterion = nn.CrossEntropyLoss(weight=class_weights)

        optimizer = torch.optim.Adam(self.model.parameters(),
                                     lr=learning_rate,
                                     weight_decay=weight_decay)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)

        train_loader = DataLoader(TensorDataset(X_train, y_train),
                                  batch_size=batch_size, shuffle=True)
        test_loader = DataLoader(TensorDataset(X_test, y_test),
                                 batch_size=batch_size, shuffle=False)

        best_acc = 0
        patience_counter = 0
        history = {'train_loss': [], 'train_acc': [], 'test_acc': []}

        for epoch in range(num_epochs):
            # 训练
            self.model.train()
            train_loss = 0
            train_correct = 0

            for batch_X, batch_y in train_loader:
                batch_X, batch_y = batch_X.to(self.device), batch_y.to(self.device)

                optimizer.zero_grad()
                outputs = self.model(batch_X)
                loss = criterion(outputs, batch_y)
                loss.backward()

                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                optimizer.step()

                train_loss += loss.item()
                _, predicted = torch.max(outputs.data, 1)
                train_correct += (predicted == batch_y).sum().item()

            # 验证
            test_correct, all_predictions, all_true_labels = self.evaluate(test_loader)

            train_acc = 100 * train_correct / len(y_train)
            test_acc = 100 * test_correct / len(y_test)

            history['train_loss'].append(train_loss / len(train_loader))
            history['train_acc'].append(train_acc)
            history['test_acc'].append(test_acc)

            scheduler.step()

            # Early stopping
            if test_acc > best_acc:
                best_acc = test_acc
                patience_counter = 0
                torch.save(self.model.state_dict(), 'best_model.pth')
            else:
                patience_counter += 1

            if patience_counter >= patience:
                print(f"Early stopping at epoch {epoch + 1}")
                break

            if (epoch + 1) % 10 == 0:
                print(f'Epoch {epoch + 1}: Train Loss: {train_loss / len(train_loader):.4f} | '
                      f'Train Acc: {train_acc:.2f}% | Test Acc: {test_acc:.2f}%')

        # 加载最佳模型
        self.model.load_state_dict(torch.load('best_model.pth'))
        return history

    def evaluate(self, test_loader):
        # 评估模型
        self.model.eval()
        test_correct = 0
        all_predictions = []
        all_true_labels = []

        with torch.no_grad():
            for batch_X, batch_y in test_loader:
                batch_X, batch_y = batch_X.to(self.device), batch_y.to(self.device)
                outputs = self.model(batch_X)
                _, predicted = torch.max(outputs.data, 1)

                test_correct += (predicted == batch_y).sum().item()
                all_predictions.extend(predicted.cpu().numpy())
                all_true_labels.extend(batch_y.cpu().numpy())

        return test_correct, all_predictions, all_true_labels

    def predict(self, X_test, y_test, batch_size=64):
        # 进行预测
        test_loader = DataLoader(TensorDataset(X_test, y_test),
                                 batch_size=batch_size, shuffle=False)
        _, predictions, true_labels = self.evaluate(test_loader)
        return np.array(predictions), np.array(true_labels)

    def generate_report(self, y_true, y_pred, save_plot=True):
        # 生成评估报告
        print("\n分类报告:")
        print(classification_report(y_true, y_pred,
                                    target_names=['做空(0)', '观望(1)', '做多(2)']))

        cm = confusion_matrix(y_true, y_pred)
        if save_plot:
            plt.figure(figsize=(10, 8))
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                        xticklabels=['Short(0)', 'Neutral(1)', 'Long(2)'],
                        yticklabels=['Short(0)', 'Neutral(1)', 'Long(2)'])
            plt.title('Confusion Matrix')
            plt.ylabel('True Label')
            plt.xlabel('Predicted Label')
            plt.savefig('confusion_matrix.png')
            plt.close()

        return cm