import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from GenreFeatureData import GenreFeatureData
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split, GridSearchCV

# class definition
class CNN(nn.Module):
    def __init__(self, num_classes):
        super(CNN, self).__init__()
        # Assuming input spectrograms are of shape [batch_size, 1, 128, 33]
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        
        # Calculate size after convolutions/ pooling
        def conv2d_size_out(size, kernel_size = 3, stride = 2, padding = 1):
            return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1
        
        # Calculate the size of the feature maps after the conv and pool layers
        conv_height = conv2d_size_out(conv2d_size_out(128))
        conv_width = conv2d_size_out(conv2d_size_out(33))
        
        linear_input_size = conv_height * conv_width * 128
        self.fc1 = nn.Linear(32768, 1024)
        self.fc2 = nn.Linear(1024, num_classes)
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        # Check if the input tensor shape is as expected
        if x.dim() != 4 or x.shape[1] != 1:
            raise ValueError(f"Expected input shape: [batch_size, 1, 128, 33], got: {x.shape}")
        # Apply convolutions, activation functions, and pooling
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        # Flatten the output for the dense layers
        # print(f"展平前的形状: {x.size()}")
        x = x.view(x.size(0), -1)  # Flatten the tensor
        # print(f"展平后的形状: {x.size()}")
        x = self.dropout(F.relu(self.fc1(x)))
        x = self.fc2(x)
        return x

    def get_accuracy(self, logits, target):
        """ compute accuracy for training round """
        # print(target.shape)
        # print(logits.shape)
        size,n = logits.size()
        # corrects = (
        #         torch.max(logits, 1)[1].view(target.size()).data == torch.max(target, 1)
        # ).sum()
        
        # Calculate the accuracy
        _, predicted = torch.max(logits.data, 1)
        correct = (predicted == torch.argmax(target, dim=1)).sum().item()
        accuracy = 100.0 * correct / size
        return accuracy

def train_step(model, loss_function, optimizer, X_batch, y_batch):
    # Reset gradient
    model.zero_grad()
    optimizer.zero_grad()
    # Get the output from the model
    # Ensure the model is in training mode
    
    
    model.train()
    y_pred = model(X_batch)
    
    # print(f"y_pred shape {y_pred.shape}")
    # print(f"y_pred  {y_pred}")
    # print(f"y_batch shape {y_batch.shape}")
    # print(f"y_batch  {y_batch}")
    # y_batch = torch.argmax(y_batch, dim=1).float()
    # y_pred = torch.argmax(y_pred, dim=1).float()
    # X_batch.requires_grad_(True)  # Set requires_grad to True for input tensor
    # y_batch.requires_grad_(True)  # Set requires_grad to True for target tensor
    # print(f"y_pred shape {y_pred.shape}")
    # print(f"y_pred  {y_pred}")
    # print(f"y_batch shape {y_batch.shape}")
    # print(f"y_batch  {y_batch}")
    y_batch = y_batch.float()
    # print(y_pred)
    # y_pred = y_pred.long()
    # Calculate the loss and perform backpropagation
    loss = loss_function(y_pred, y_batch)
    loss.backward()
    optimizer.step()
    
    return loss
def validate(model, data_loader, loss_function,device):
    model.eval()  # Set the model to evaluation mode
    total_loss = 0.0
    total_accuracy = 0.0
    num_samples = 0

    with torch.no_grad():
        for inputs, targets in data_loader:
            inputs = inputs.to(device)
            targets = targets.to(device)

            # Forward pass
            outputs = model(inputs)
            
            # Calculate the loss
            loss = loss_function(outputs, torch.argmax(targets, dim=1))

            # Calculate the accuracy
            _, predicted = torch.max(outputs.data, 1)
            correct = (predicted == torch.argmax(targets, dim=1)).sum().item()

            # Accumulate the loss and accuracy
            total_loss += loss.item() * inputs.size(0)
            total_accuracy += correct
            num_samples += inputs.size(0)

    avg_loss = total_loss / num_samples
    avg_accuracy = (total_accuracy / num_samples) * 100

    return avg_loss, avg_accuracy
# def validate(model, data_loader, loss_function):
#     model.eval()  # 将模型设置为评估模式
#     total_loss = 0.0
#     total_accuracy = 0.0
#     num_samples = 0

#     with torch.no_grad():
#         for inputs, targets in data_loader:
#             inputs = inputs.cuda()
#             targets = targets.cuda()

#             # 前向传播
#             outputs, _ = model(inputs)
            
#             output = torch.argmax(outputs, dim=1).float()
#             target = torch.argmax(targets, dim=1).float()

#             loss = loss_function(output, target)

#             # 统计损失和准确率
#             total_loss += loss.item() * inputs.size(0)
#             total_accuracy += model.get_accuracy(outputs, targets) * inputs.size(0)
#             num_samples += inputs.size(0)

#     avg_loss = total_loss / num_samples
#     avg_accuracy = total_accuracy / num_samples

#     return avg_loss, avg_accuracy

def main():
    # Hyperparameters
    input_dim = 33  # Assuming 33 total spectral features as mentioned in the docstring
    hidden_dim = 128
    output_dim = 6  # Number of genre classes; adjust as needed
    num_layers = 2
    batch_size = 32
    learning_rate = 0.0001
    num_epochs = 50
    # batch_norm = True
    best_accuracy = 0.0
    
    device='cuda:0'
    genre_features = GenreFeatureData()

    # if all of the preprocessed files do not exist, regenerate them all for self-consistency
    if (
            os.path.isfile(genre_features.train_X_preprocessed_data)
            and os.path.isfile(genre_features.train_Y_preprocessed_data)
            and os.path.isfile(genre_features.dev_X_preprocessed_data)
            and os.path.isfile(genre_features.dev_Y_preprocessed_data)
            and os.path.isfile(genre_features.test_X_preprocessed_data)
            and os.path.isfile(genre_features.test_Y_preprocessed_data)
    ):
        print("Preprocessed files exist, deserializing npy files")
        genre_features.load_deserialize_data()
    else:
        print("Preprocessing raw audio files")
        genre_features.load_preprocess_data()
    
    
    train_X = torch.from_numpy(genre_features.train_X).type(torch.Tensor)
    dev_X = torch.from_numpy(genre_features.dev_X).type(torch.Tensor)
    test_X = torch.from_numpy(genre_features.test_X).type(torch.Tensor)
    train_X = train_X.unsqueeze(1)  # Adds the channel dimension
    dev_X = dev_X.unsqueeze(1)      # Adds the channel dimension
    test_X = test_X.unsqueeze(1)    # Adds the channel dimension

    # Targets is a long tensor of size (N,) which tells the true class of the sample.
    train_Y = torch.from_numpy(genre_features.train_Y).type(torch.LongTensor)
    dev_Y = torch.from_numpy(genre_features.dev_Y).type(torch.LongTensor)
    test_Y = torch.from_numpy(genre_features.test_Y).type(torch.LongTensor)
    
    # train_X = torch.concatenate((train_X,dev_X),axis=0)
    # train_Y = torch.concatenate((train_Y,dev_Y),axis=0)
    
    
    X = torch.concatenate((train_X,test_X,dev_X),axis=0)
    y = torch.concatenate((train_Y,test_Y,dev_Y),axis=0)
    # 假设你的特征数据保存在 X 变量中，标签保存在 y 变量中
    train_X, test_X, train_Y, test_Y = train_test_split(X, y, test_size=0.2, random_state=42)
    # Initialize model
    
    model = CNN(output_dim).to(device)
    loss_function = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    
    
    
    
    # Prepare DataLoader for batch processing
    train_data = TensorDataset(train_X, train_Y)
    # val_data = TensorDataset(dev_X, dev_Y)
    test_data = TensorDataset(test_X, test_Y)

    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True,drop_last=True)
    # val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
    for param in model.parameters():
        assert param.requires_grad == True, "Some model parameters do not require gradients."
    # Training Loop
    for epoch in range(num_epochs):
        model.train()
        epoch_loss = 0
        epoch_accuracy = 0
        
        for X_batch, y_batch in train_loader:
            # print(1)
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            # print(f"x_batch shape:{X_batch.shape}")
            
            loss = train_step(model, loss_function, optimizer, X_batch, y_batch)
            
            # Get predictions and calculate accuracy
            y_pred = model(X_batch)
            accuracy = model.get_accuracy(y_pred, y_batch)
            
            epoch_loss += loss.item()
            epoch_accuracy += accuracy
        
        # Validation step
        val_loss, val_accuracy = validate(model, test_loader, loss_function, device)

        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss/len(train_loader)}, Accuracy: {epoch_accuracy/len(train_loader)}, Val Loss: {val_loss}, Val Accuracy: {val_accuracy}')
         # Check if current model has better accuracy than the previous best
        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            # Save current model checkpoint
            best_model = model.state_dict()
            best_checkpoint_path = f"CNN_{output_dim}classes_{best_accuracy}.pth"
            
    torch.save(best_model, best_checkpoint_path)
    # Save model
    # torch.save(model.state_dict(), 'lstm_genre_classifier.pth')
    
if __name__ == "__main__":
    main()