import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import torch.cuda
import numpy as np
from 封装ST_GCN.model.st_gcn import STGCN


class STGCNTrainer:
    def __init__(self, train_data_path, train_label_path, num_nodes, use_residual, batch_size=32, num_epochs=300, lr=0.001,
                 patience=30,
                 graph_args={'layout': 'openpose','strategy': 'adaptive'}, edge_importance_weighting=True):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Using device: {self.device}")

        # 加载训练数据
        self.train_data = np.load(train_data_path)
        self.train_label = np.load(train_label_path)

        # 将数据转换为张量并移动到指定设备
        self.train_data = torch.tensor(self.train_data, dtype=torch.float32).to(self.device)
        self.train_label = torch.tensor(self.train_label, dtype=torch.long).to(self.device)

        # 创建训练数据集和数据加载器
        train_dataset = TensorDataset(self.train_data, self.train_label)
        self.train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

        # 确定模型的类别数和输入通道数
        self.num_classes = len(np.unique(self.train_label.cpu().numpy()))
        self.in_channels = self.train_data.shape[1]
        self.graph_args = graph_args
        self.edge_importance_weighting = edge_importance_weighting
        self.num_nodes = num_nodes
        self.use_residual = use_residual

        # 创建STGCN模型并移动到指定设备
        self.model = STGCN(self.in_channels, self.num_classes, self.graph_args,
                           self.num_nodes,
                           self.edge_importance_weighting,
                           use_residual=self.use_residual).to(self.device)

        # 定义损失函数和优化器
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.Adam(self.model.parameters(), lr=lr)

        # 提前停止策略的参数
        self.patience = patience
        self.best_epoch = 0
        self.counter = 0
        self.best_train_loss = float('inf')
        self.num_epochs = num_epochs

    def calculate_accuracy(self, data_loader):
        self.model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for inputs, labels in data_loader:
                outputs = self.model(inputs)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        accuracy = correct / total
        return accuracy

    def train(self):
        for epoch in range(self.num_epochs):
            self.model.train()
            running_loss = 0.0
            for i, (inputs, labels) in enumerate(self.train_loader):
                self.optimizer.zero_grad()
                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)
                loss.backward()
                self.optimizer.step()
                running_loss += loss.item()
            train_loss = running_loss / len(self.train_loader)

            train_accuracy = self.calculate_accuracy(self.train_loader)
            print(f'Epoch {epoch + 1}/{self.num_epochs}, Loss: {train_loss}, Train Accuracy: {train_accuracy * 100:.2f}%')

            if train_loss < self.best_train_loss:
                self.best_train_loss = train_loss
                self.best_epoch = epoch
                self.counter = 0
                print('Saving')
                torch.save(self.model.state_dict(),'stgcn_model.pth')
            else:
                self.counter += 1
                if self.counter >= self.patience:
                    print(f'Early stopping at epoch {epoch + 1}. Best epoch was {self.best_epoch + 1}.')
                    break