import torch
# import numpy as np
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision import transforms
from torch.utils.data import DataLoader
from PIL import Image
from tqdm import tqdm
import os

data_transforms = {
    'train': transforms.Compose([
        transforms.Resize([224, 224]),
        transforms.RandomRotation(45),
        transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),
        transforms.RandomGrayscale(p=0.1),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    "valid":transforms.Compose([
        transforms.Resize([224, 224]),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}

class nav_dataset(torch.utils.data.Dataset):
    def __init__(self, file_path, transform=None):
        self.file_path = file_path
        self.transform = transform
        self.images = []
        self.labels = []
        with open(file_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                img_path, label = line.strip().split()
                self.images.append(img_path)
                self.labels.append(int(label))
    def __len__(self):
        return len(self.images)
    def __getitem__(self, index):
        img_path = self.images[index]
        full_path = os.path.join("/zssd/zxy/nav_dataset/output", img_path)
        
        img = Image.open(full_path)
        if self.transform:
            img = self.transform(img)
        label = self.labels[index]
        label = torch.tensor(label,dtype=torch.int64)
        return img, label


class my_CNN1(nn.Module):
    def __init__(self, n_classes):
        super().__init__()
        self.n_classes = n_classes
        self.cnn_model = torchvision.models.resnet152(weights=torchvision.models.ResNet152_Weights.IMAGENET1K_V1)
        for param in self.cnn_model.parameters():
            param.requires_grad = False
        class Identity(nn.Module):
            def __init__(self):
                super(Identity, self).__init__()

            def forward(self, x):
                return x
        
        self.cnn_model.fc = Identity()
        self.cnn_model.fc1 = nn.Linear(2048, self.n_classes)
    def forward(self, x):
        x = self.cnn_model(x)
        x = self.cnn_model.fc1(x)
        return x


def train_model(model,num_epochs,train_loader):
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.train()
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch+1, num_epochs))
        loop = tqdm(train_loader, total=len(train_loader), desc="训练中")
        for (images, labels) in loop:
        # for i,(images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            loop.set_postfix(loss=loss.item())

def test_model(model,test_loader):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images = images.to(device)
            labels = labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))


if __name__ == '__main__':
    model = my_CNN1(23)
    training_data = nav_dataset(file_path = 'train.txt',transform = data_transforms['train'])
    test_data = nav_dataset(file_path = 'test.txt',transform = data_transforms['valid'])
    train_size = int(1.0 * len(training_data))
    valid_size = len(training_data) - train_size
    train_dataset, valid_dataset = torch.utils.data.random_split(training_data, [train_size, valid_size], generator=torch.Generator().manual_seed(42))
    train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)  # 64张图片为一个包，
    test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    train_model(model, 15, train_dataloader)
    test_model(model, test_dataloader)

