import os
from PIL import Image
from torch.utils import data
import torch as t
from torch.autograd import Variable as V
import numpy as np
from torchvision import transforms as T
from pathlib import Path
import random
from torch.utils.data import DataLoader
# from torch import meter
import time
from torch import nn
import tqdm


class DogCat(data.Dataset):

    def __init__(self, root, transforms=None):
        self.root_path = Path(root)

        dogs = [(ele, 'dog') for ele in self._load_sub('dogs')]
        cats = [(ele, 'cat') for ele in self._load_sub('cats')]
        self.images = dogs + cats
        random.shuffle(self.images)

        normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

        self.transforms = T.Compose(
            [
                T.Scale(256),
                T.RandomSizedCrop(244),
                T.RandomHorizontalFlip(),
                T.ToTensor(),
                normalize
            ]
        ) if not transforms else transforms
        self.label_mapping = {'dog': 0, 'cat': 1}

    def _load_sub(self, sub_dir):
        cur_dir = self.root_path / sub_dir
        return [os.path.join(cur_dir, img) for img in os.listdir(cur_dir)]

    def __getitem__(self, index):
        img_path, label = self.images[index]
        _d = Image.open(img_path)
        _d = self.transforms(_d)
        return _d, self.label_mapping[label]

    def __len__(self):
        return len(self.images)


class BasicModel(t.nn.Module):
    def __init__(self):
        super(BasicModel, self).__init__()
        self.model_name = str(type(self))

    def load(self, name=None):
        if name is None:
            prefix = 'checkpoints/' + self.model_name + '_'
            name = time.strftime(prefix + '%m%d_%H:%M%S.pth')
        t.save(self.state_dict(), name)
        return name

    def save(self, path=None):
        self.load_state_dict(t.load(path))


# from torchvision.models import AlexNet
class AlexNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 64, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(64 * 6 * 6, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(512, 128),
            nn.ReLU(inplace=True),
            nn.Linear(128, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), 64 * 6 * 6)
        x = self.classifier(x)
        return x


train_dataset = DogCat('../data/dog_cat/training_set')
test_dataset = DogCat('../data/dog_cat/test_set')

train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=True, num_workers=2)
criterion = t.nn.CrossEntropyLoss()

model = AlexNet(num_classes=2).cuda()

optimizer = t.optim.Adam(model.parameters(), lr=1e-3)
# loss_meter = meter.
for epoch in range(5):
    print(epoch)
    for ii, (data, label) in tqdm.tqdm(enumerate(train_loader)):
        input = V(data).cuda()
        target = V(label).cuda()
        optimizer.zero_grad()
        score = model(input)
        loss = criterion(score, target)
        loss.backward()
        optimizer.step()
rslt = []
for ii, (data, label) in tqdm.tqdm(enumerate(test_loader)):
    input = V(data).cuda()
    target = V(label).cuda()
    optimizer.zero_grad()
    score = model(input)
    loss = criterion(score, target)
    rslt.append((label, loss))
print(rslt)