# Import necessary packages.
import numpy as np
import torch
import os
import torch.nn as nn
from torchvision import transforms
from PIL import Image
# "ConcatDataset" and "Subset" are possibly useful when doing semi-supervised learning.
from torch.utils.data import ConcatDataset, DataLoader, Subset, Dataset, SubsetRandomSampler
from torchvision.datasets import DatasetFolder, VisionDataset
# This is for the progress bar.
from tqdm.auto import tqdm
import random
import utils
from torch.utils.tensorboard import SummaryWriter

from models import *

myseed = 6666  # set a random seed for reproducibility
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(myseed)
np.random.seed(myseed)
torch.manual_seed(myseed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(myseed)

# Normally, We don't need augmentations in testing and validation.
# All we need here is to resize the PIL image and transform it into Tensor.
test_tfm = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(), 
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # normalize
])

# However, it is also possible to use augmentation in the testing phase.
# You may use train_tfm to produce a variety of images and then test using ensemble methods
train_tfm = transforms.Compose([
    # Resize the image into a fixed shape (height = width = 128)
    # transforms.Resize((128, 128)),
    # You may add some transforms here.
    transforms.RandomResizedCrop(size=(256, 256), scale=(0.8, 1.0)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(90),
    transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1),

    # ToTensor() should be the last one of the transforms 
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # normalize
])


# "cuda" only when GPUs are available.
device = "cuda" if torch.cuda.is_available() else "cpu"

# The number of batch size.
# batch_size = 64

# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
batch_size = 64 // 4



# The number of training epochs.
n_epochs = 300

# If no improvement in 'patience' epochs, early stop.
patience = 50

# learning rate
learning_rate = 0.001

# milestone&decay rate for MultiStepLR 
# milestones = [50, 120, 180]
# decay_rate = 0.5
# decay_rate for ExponentialLR
decay_rate = 0.98

# train data path
train_data_path = "/mnt/DISK/xjk/ml/datasets/food11/training"
# valid data path
valid_data_path = "/mnt/DISK/xjk/ml/datasets/food11/validation"
# test data path
test_data_path = "/mnt/DISK/xjk/ml/datasets/food11/test"


