import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from sklearn.metrics import confusion_matrix
from torchvision import datasets, transforms
import numpy as np
np.set_printoptions(suppress=True, precision=10)
import yaml
import os
import csv

from models import SimpleMLP


class WritableToTensor(transforms.ToTensor):
    def __call__(self, pic):
        if isinstance(pic, np.ndarray):
            pic = np.array(pic, copy=True, dtype=np.float32)
        return super().__call__(pic)


class Exp:

    def __init__(self, config):

        self.config = config
        self.model_name = self.config['model']
        self.device = self.config['device']
        self.init_weight_method = self.config['init_weight_method']
        self.batch_size = self.config['batch_size']
        self.num_epochs = self.config['epochs']
        self.output_dir = self.config['output_dir']
        self.use_res_connect = self.config['use_res_connect']

        self.model = self.__build_model()
        self.model._init_weight(method=self.init_weight_method)

        self.criterion = nn.CrossEntropyLoss().to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.config['learning_rate'])

        self.__load_data()

        # check dir
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        # save config to output dir
        with open(os.path.join(self.output_dir, 'config.yaml'), 'w') as f:
            yaml.dump(self.config, f)

    def __build_model(self):

        if self.model_name == 'SimpleMLP':
            model = SimpleMLP(res_connect=self.use_res_connect).to(self.device)

        return model

    def __load_data(self):
        transform = transforms.Compose([
            WritableToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        self.train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
        self.test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
        self.train_loader = DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True)
        self.test_loader = DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=False)

    def train(self):

        num_epochs = self.num_epochs
        with open(os.path.join(self.output_dir, 'train_log.txt'), 'w') as log_file:
            for epoch in range(num_epochs):
                self.model.train()
                running_loss = 0.0
                for images, labels in self.train_loader:
                    self.optimizer.zero_grad()
                    images = images.to(self.device)
                    labels = labels.to(self.device)
                    outputs = self.model(images)
                    loss = self.criterion(outputs, labels)
                    loss.backward()
                    self.optimizer.step()
                    running_loss += loss.item()
                print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(self.train_loader):.4f}', file=log_file)

        # save ckpt
        torch.save(self.model.state_dict(), os.path.join(self.output_dir, f"{self.model_name}.pth"))

    def test(self):

        # Eval
        self.model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for images, labels in self.test_loader:
                images = images.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
            with open(os.path.join(self.output_dir, 'test_log.txt'), 'w') as log_file:
                print(f'Accuracy of the model on the test images: {100 * correct / total:.2f}%', file=log_file)

    def test_with_ckpt(self):
        ckpt_path = os.path.join(self.output_dir, 'SimpleMLP.pth')
        
        if not os.path.exists(ckpt_path):
            raise FileNotFoundError(f"Checkpoint not found at '{ckpt_path}'")
        
        self.model.load_state_dict(torch.load(ckpt_path, map_location=self.device))
        self.model.to(self.device)
        
        self.model.eval()
        
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        self.all_softmax = []

        with torch.no_grad():
            for images, labels in self.test_loader:
                images = images.to(self.device)
                labels = labels.to(self.device)
                
                # 前向传播
                outputs = self.model(images)
                
                # 计算Softmax概率
                probabilities = F.softmax(outputs, dim=1)
                # print(probabilities.cpu().numpy().round(6))
                
                _, predicted = torch.max(outputs.data, 1)
                
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
                
                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        
        accuracy = 100 * correct / total
        
        cm = confusion_matrix(all_labels, all_preds)
        
        cm_str = '\n'.join(['\t'.join(map(str, row)) for row in cm])
        
        log_path = os.path.join(self.output_dir, 'test_ckpt_log.txt')
        
        # 将结果写入日志文件
        with open(log_path, 'w') as log_file:
            log_file.write(f'Accuracy of the model on the test images: {accuracy:.2f}%\n')
            log_file.write('Confusion Matrix:\n')
            log_file.write(cm_str)
        
        print(f'Test with checkpoint completed. Results saved to {log_path}')