import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F

device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")

class CustomDataset(Dataset):
    def __init__(self, data_dir, label_dir):
        self.data_dir = data_dir
        self.label_dir = label_dir
        self.list_len = len(os.listdir(data_dir))
        self.data_files = [f'{i}_data.npy' for i in range(self.list_len)]
        self.label_files = [f'{i}_lab.npy' for i in range(self.list_len)]
    def __len__(self):
        return self.list_len
    def __getitem__(self, idx):
        data_file = self.data_files[idx]
        label_file = self.label_files[idx]
        data = np.load(os.path.join(self.data_dir, data_file))
        label = np.load(os.path.join(self.label_dir, label_file))[0]
        return torch.tensor(data, dtype=torch.float32).unsqueeze(0), torch.tensor(label, dtype=torch.long)

train_data_dir = 'Train_data'
train_label_dir = 'Train_lab'
train_dataset = CustomDataset(train_data_dir, train_label_dir)
train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True)

test_data_dir = 'Test_data'
test_label_dir = 'Test_lab'
test_dataset = CustomDataset(test_data_dir, test_label_dir)
test_loader = DataLoader(test_dataset, batch_size=5, shuffle=True)

class Faildetect_CNN(nn.Module):
    def __init__(self):
        super(Faildetect_CNN, self).__init__()
        self.conv1 = nn.Sequential(         
            nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1),                              
            nn.ReLU(),                      
            nn.MaxPool2d(kernel_size=2),    
        )
        self.conv2 = nn.Sequential(         
            nn.Conv2d(16, 32, 3, 1, 1),     
            nn.ReLU(),                      
            nn.MaxPool2d(2),                
        )
        # fully connected layer, output 10 classes
        self.FC1 = nn.Linear(32 * 45 * 4, 256)
        self.FC2 = nn.Linear(256, 5)

    def forward(self, x):
        
        x = self.conv1(x)
        x = self.conv2(x)

        x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)      
        x = F.relu(self.FC1(x))
        output = self.FC2(x)
        return output

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)
        self.pool1 = nn.MaxPool2d(kernel_size=2)
        self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)
        self.pool2 = nn.MaxPool2d(kernel_size=2)
        self.fc1 = nn.Linear(32 * 45 * 4, 128)  # Adjust the linear layer input size according to the output of the conv layers
        self.fc2 = nn.Linear(128, 5)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = self.pool1(x)
        x = torch.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(x.size(0), -1)  # Flatten the tensor for the fully connected layers
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x


model = Faildetect_CNN()
print(model)
print('# Model parameters:', sum(param.numel() for param in model.parameters()))
Criterion = nn.CrossEntropyLoss()
Optimizer = optim.Adam(model.parameters())

train_loss = []
train_acc = []
test_loss = []
test_acc  = []

for epoch in range(10):
    
    train_loss_sum, test_loss_sum, train_num, test_num, train_i, test_i = 0.0, 0.0, 0, 0, 0, 0
    train_acc_sum,test_acc_sum = 0, 0
    TEST_acc_sum = 0
    
    for i_1, train_data in enumerate(train_loader):
        
        inputs, labels = train_data
        inputs = inputs.type(torch.FloatTensor)
        inputs = inputs.to(device)
        
        labels = labels.long()
        labels = labels.to(device)
        
        model.train()
        pre_labs = model(inputs)

        Loss = Criterion(pre_labs, labels)

        Optimizer.zero_grad()
        Loss.backward()
        Optimizer.step()
        
        train_loss_sum += Loss.item()
        train_acc_sum += (pre_labs.argmax(dim=1) == labels).sum().item()
        train_num += labels.shape[0]
        train_i += 1

    for i_2, data in enumerate(test_loader):
        with torch.no_grad():

            test_data, test_lab = data  
            test_data = test_data.type(torch.FloatTensor)
            test_data = test_data.to(device)
            
            test_lab = test_lab.long()
            test_lab = test_lab.to(device)

            model.eval()
            pre_test = model(test_data)
            t_loss = Criterion(pre_test, test_lab)

            test_loss_sum += t_loss    
            test_acc_sum += (pre_test.argmax(dim=1) == test_lab).sum().item()
            test_num += test_lab.shape[0]                    
            test_i += 1
           
    # if epoch >= 5:
    #     torch.save(model, "model/1/CNN_%d.pkl"% epoch)
    #     print("-------保存第%d epoch的模型---------"% epoch)
       
    Train_Loss = train_loss_sum/train_i
    Test_Loss = test_loss_sum/test_i
    Train_ACC = train_acc_sum/train_num
    Test_ACC = test_acc_sum/test_num

    print('Epoch:%d, train_loss:%.5f, train_acc:%.5f, test_loss:%.5f, test_acc:%.5f' % 
          (epoch, Train_Loss, Train_ACC, Test_Loss, Test_ACC))
    print('-----------------------------------------------')

    train_loss.append(Train_Loss)            
    train_acc.append(Train_ACC) 
    test_loss.append(Test_Loss) 
    test_acc.append(Test_ACC)       

print("Finished Training")

# num_epochs = 10
# for epoch in range(num_epochs):
#     model.train()
#     running_loss = 0.0
#     for data, labels in train_loader:
#         optimizer.zero_grad()
#         outputs = model(data)
#         loss = criterion(outputs, labels)
#         loss.backward()
#         optimizer.step()
#         running_loss += loss.item()
#     print(f'Epoch {epoch + 1}, Loss: {running_loss / len(train_loader)}')
