import torch 
import torch.nn as nn 
import numpy as np 
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import glob 
import matplotlib.pyplot as plt 
import tqdm

image_paths = glob.glob("./train/*")

test_paths = image_paths[:1000]

print(test_paths)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class CatDogDataset(Dataset):
    def __init__(self, paths):
        super(CatDogDataset, self).__init__()
        self.paths = paths
    def __getitem__(self, idx):
        img_path = self.paths[idx]
        label = 0
        path_label = img_path.split("/")[-1][:3]
        if path_label == "cat":
            label = 1
        img = Image.open(img_path)
        img = img.resize((224, 224))
        img_array = np.array(img)
        img_array = img_array.transpose(2, 0, 1) 
        img_array = img_array / 255
        # plt.imshow(img_array[0])
        # print(label)
        # plt.show()
       
        return torch.tensor(img_array.copy(), dtype=torch.float32), torch.tensor([label], dtype=torch.float32)

    def __len__(self):
        return len(self.paths)

# class RepeatLayer(nn.Module):
#     def __init__(self, in_channels, out_channels):
#         super(RepeatLayer, self).__init__()
#         self.red = nn.MaxPool2d(kernel_size=2, stride=2)
#         self.conv_relu = nn.Sequential(
#             nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(),
#             nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(),
#             nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(),
#         )
#     def forward(self, x):
#         x = self.red(x)
#         x = self.conv_relu(x)
#         # print(x.shape)
#         return x 

# class VGG(nn.Module):
#     def __init__(self):
#         super(VGG, self).__init__()
#         self.conv1 = nn.Sequential(
#             nn.Conv2d(3, 64, 3, padding=1),
#             nn.BatchNorm2d(64),
#             nn.ReLU(),

#             nn.Conv2d(64, 64, 3, padding=1),
#             nn.BatchNorm2d(64),
#             nn.ReLU(),
#         )
#         self.conv2 = nn.Sequential(
#             nn.MaxPool2d(2, 2),
#             nn.Conv2d(64, 128, 3, padding=1),
#             nn.BatchNorm2d(128),
#             nn.ReLU(),
#             nn.Conv2d(128, 128, 3, padding=1),
#             nn.BatchNorm2d(128),
#             nn.ReLU(),
#         )
#         self.conv3 = RepeatLayer(128, 256)
#         self.conv4 = RepeatLayer(256, 512)
#         self.conv5 = RepeatLayer(512, 512)
#         self.maxpool = nn.MaxPool2d(2, 2) 
#         self.fc1 = nn.Linear(7*7*512, 512)
#         self.fc = nn.Linear(512, 1)

#     def forward(self, x):
#         # x: (batch, channels, W, H)
#         batch = x.shape[0]
#         x = self.conv1(x)
#         x = self.conv2(x)
#         x = self.conv3(x)
#         x = self.conv4(x)
#         x = self.conv5(x)
#         x = self.maxpool(x)
#         x = x.view(batch, -1)
#         x = self.fc1(x)
#         out = self.fc(x)
#         print(out)
#         out = torch.sigmoid(out)

#         return out 

cfg = {
    'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}

# 模型需继承nn.Module
class VGG(nn.Module):
# 初始化参数：
    def __init__(self, vgg_name):
        super(VGG, self).__init__()
        self.features = self._make_layers(cfg[vgg_name])
        self.classifier = nn.Linear(25088, 1)

# 模型计算时的前向过程，也就是按照这个过程进行计算
    def forward(self, x):
        out = self.features(x)
        out = out.view(out.size(0), -1)
        out = self.classifier(out)
        out = torch.sigmoid(out)
        return out

    def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

if __name__ == "__main__":
    
    model = VGG("VGG16")
    model.to(device)
    model.train()
    # t1 = torch.rand((1, 3, 224, 224))
    # print(t1.shape)
    # out = model(t1)
    # print(out)
    # import os 
    # os._exit(0)

    train_dataset = CatDogDataset(image_paths)
    dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)
    test_data = CatDogDataset(test_paths)
    test_dataloader = DataLoader(test_data, batch_size=1, shuffle=True)
    
    # for img, label in test_dataloader:
    #     plt.imshow(img[0,0])
    #     plt.show()

    loss_func = nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    print(model.parameters())

    step = 0
    report_loss = 0
    for epoch in range(5):

        for img, label in tqdm.tqdm(dataloader, total=len(dataloader)):
            print(img)
            step += 1
            optimizer.zero_grad()
            img = img.to(device)
            batch = img.shape[0]
            label = label.to(device)

            pred = model(img)

            loss = loss_func(pred, label)
            report_loss += loss.item()

            loss.backward()
            optimizer.step()
            if step % 200 == 0:
                model.eval()
                print("report loss is " + str(report_loss))
                report_loss = 0
                right = 0
                total = len(test_data)
                for test_img, test_label in tqdm.tqdm(test_dataloader):
                    test_img = test_img.to(device)
                    test_label = test_label.to(device)
                    perd_out = model(test_img)
                    print(perd_out)
                    # if int(perd_out.argmax(-1).item()) == int(test_label.item()):
                    if int((perd_out > 0.5).item()) == int(test_label.item()):
                        right += 1
                acc = right / total
                print("acc is " + str(acc))
                # img1 = Image.open("./train/cat.2.jpg")
                # img1 = img1.resize((224, 224))
                # img1_array = np.array(img1)
                # img1_array = img1_array.transpose(2, 0, 1)
                # img1_tensor = torch.tensor(img1_array, dtype=torch.float32)
                # img1_tensor = img1_tensor.unsqueeze(0)
                # img1_tensor = img1_tensor.to(device)
                # pred_out = model(img1_tensor)

                # print("pred_out is " + str(pred_out))

                model.train()
