import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision.datasets import ImageFolder
import torchvision.transforms as tt
import matplotlib.pyplot as plt
import sys


# data_dir = './dataset/Fruit/fruits-360_dataset/fruits-360/'
 # # print(os.listdir(data_dir))
# classes = os.listdir(data_dir + "Training")
# train_tfms = tt.Compose([
#     tt.RandomCrop(100, padding=4, padding_mode='reflect'),
#     tt.Resize((100, 100)),
#     tt.RandomHorizontalFlip(),
#     # tt.RandomRotate
#     # tt.RandomResizedCrop(256, scale=(0.5,0.9), ratio=(1, 1)),
#     # tt.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
#     tt.ToTensor(),
#
# ])
# valid_tfms = tt.Compose([tt.ToTensor(), tt.Resize((100, 100)),
#                          ])
# dataset = ImageFolder(data_dir + "/Training", transform=train_tfms)
# test_dataset = ImageFolder(data_dir + "/Test", transform=valid_tfms)
# print(classes)


# model

# 设置精度函数和图像基类
def accuracy(outputs, labels):
    _, preds = torch.max(outputs, dim=1)
    return torch.tensor(torch.sum(preds == labels).item() / len(preds))


class ImageClassificationBase(nn.Module):
    def training_step(self, batch):
        images, labels = batch
        out = self(images)  # Generate predictions
        loss = F.cross_entropy(out, labels)  # Calculate loss
        # print(out)
        return loss

    def validation_step(self, batch):
        images, labels = batch
        out = self(images)  # Generate predictions
        loss = F.cross_entropy(out, labels)  # Calculate loss
        acc = accuracy(out, labels)  # Calculate accuracy
        return {'val_loss': loss.detach(), 'val_acc': acc}

    def validation_epoch_end(self, outputs):
        batch_losses = [x['val_loss'] for x in outputs]
        epoch_loss = torch.stack(batch_losses).mean()  # Combine losses
        batch_accs = [x['val_acc'] for x in outputs]
        epoch_acc = torch.stack(batch_accs).mean()  # Combine accuracies
        return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}

    def epoch_end(self, epoch, result):
        print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['train_loss'],
                                                                                         result['val_loss'],
                                                                                         result['val_acc']))


class Fruit360CnnModel(ImageClassificationBase):
    def __init__(self):
        super().__init__()
        self.network = nn.Sequential(

            nn.Conv2d(3, 16, kernel_size=2, padding=1),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # 16 X 50 X 50

            nn.Conv2d(16, 32, kernel_size=2, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # 32 X 25 X 25

            nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(5, 5),  # 64 X 5 X 5

            nn.Flatten(),
            nn.Dropout(0.3),
            nn.ReLU(),
            nn.Linear(64 * 5 * 5, 131))

    def forward(self, xb):
        return self.network(xb)

def predict_image(img, model):
    # Convert to a batch of 1
    # xb = to_device(img.unsqueeze(0), device)
    # Get predictions from model
    yb = model(img.unsqueeze(0))
    # Pick index with highest probability
    _, preds  = torch.max(yb, dim=1)
    # Retrieve the class label
    return preds[0].item()

# img, label = test_dataset[3000]
# plt.imshow(img.permute(1, 2, 0))
#
# model = torch.load("bestModel.pth",map_location="cpu")
# print('Label:', dataset.classes[label], 'Predicted:', predict_image(img, model))


#--------------------------------------------------------------------------------------
image_path = sys.argv[1]
model_path = sys.argv[2]
# image_path = "./train/Pear Kaiser/200_100.jpg"
# data_dir = './dataset/Fruit/fruits-360_dataset/fruits-360/'

# print(os.listdir(data_dir))

image = Image.open(image_path)

# print(image)
# 该语句是因为不是所有类型的图片都是三通道的，通过这句代码可以将所有类型的通篇都转化成3通道
image = image.convert('RGB')
transform = valid_tfms = tt.Compose([tt.ToTensor(), tt.Resize((100, 100)),
                         ])

image = transform(image)
plt.imshow(image.permute(1, 2, 0))

model = torch.load(model_path,map_location="cpu")
print( predict_image(image, model))