# 预处理，把resize之后的图片保存
#         cv2.imwrite(os.path.join(savePath, file), img)

# Import需要的套件
import os
import numpy as np
import cv2
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import time

import ImgDataset
import Classifier

# 全局变量
batch_size = 64
# training 時做 data augmentation
train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.RandomHorizontalFlip(),  # 隨機將圖片水平翻轉
    transforms.RandomRotation(15),  # 隨機旋轉圖片
    transforms.ToTensor(),  # 將圖片轉成 Tensor，並把數值normalize到[0,1](data normalization)
])
# testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),
])
# loss function
loss = nn.CrossEntropyLoss()
num_epoch = 15


# 利用 OpenCV (cv2) 讀入照片並存放在 numpy array 中
def readfile(path, label):
    # label 是一個 boolean variable，代表需不需要回傳 y 值
    image_dir = sorted(os.listdir(path))
    print(image_dir)
    x = np.zeros((len(image_dir), 128, 128, 3), dtype=np.uint8)
    y = np.zeros((len(image_dir)), dtype=np.uint8)
    for i, file in enumerate(image_dir):
        x[i, :, :] = cv2.imread(os.path.join(path, file))
        # x[i, :, :] = cv2.resize(img,(128, 128)) 把resize好的保存到pre文件夹里了，不用每次处理
        if label:
            y[i] = int(file.split("_")[0])
    if label:
        return x, y
    else:
        return x


# 读取训练好的参数，并进行测试
def testmodel():
    os.chdir('E:\深度学习作业\数据\hw3')
    print("touch {}".format(torch.cuda.is_available()))

    # 分別將 training set、validation set、testing set 用 readfile 函式讀進來
    workspace_dir = './food-11'
    print("Reading data")
    print(os.path.join(workspace_dir, "training_pre"))  # 非要在这里打印一下才能运行
    val_x, val_y = readfile(os.path.join(workspace_dir, "validation_pre"), True)
    print("Size of validation data = {}".format(len(val_x)))

    # 建立dataset和dataloader
    val_set = ImgDataset.ImgDataset(val_x, val_y, test_transform)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False,
                            pin_memory=True, num_workers=0
                            )

    net = Classifier.Classifier()
    model = net.cuda()
    net.load_state_dict(torch.load('net_params_everyround.pkl'))

    model.eval()
    val_acc = 0.0
    val_loss = 0.0
    with torch.no_grad():
        for i, data in enumerate(val_loader):
            val_pred = model(data[0].cuda())
            batch_loss = loss(val_pred, data[1].cuda())

            val_acc += np.sum(np.argmax(val_pred.cpu().data.numpy(), axis=1) == data[1].numpy())
            val_loss += batch_loss.item()

        # 將結果 print 出來
        print('Val Acc: %3.6f loss: %3.6f' % (val_acc / val_set.__len__(), val_loss / val_set.__len__()))


# 读取训练好的参数，并继续训练
def continue_train_model():
    os.chdir('E:\深度学习作业\数据\hw3')
    print("touch {}".format(torch.cuda.is_available()))

    # 分別將 training set、validation set、testing set 用 readfile 函式讀進來
    workspace_dir = './food-11'
    print("Reading data")
    print(os.path.join(workspace_dir, "training_pre"))  # 非要在这里打印一下才能运行
    train_x, train_y = readfile(os.path.join(workspace_dir, "training_pre"), True)
    print("Size of training data = {}".format(len(train_x)))
    val_x, val_y = readfile(os.path.join(workspace_dir, "validation_pre"), True)
    print("Size of validation data = {}".format(len(val_x)))

    # 建立dataset和dataloader
    train_val_x = np.concatenate((train_x, val_x), axis=0)
    train_val_y = np.concatenate((train_y, val_y), axis=0)
    train_val_set = ImgDataset.ImgDataset(train_val_x, train_val_y, train_transform)
    train_val_loader = DataLoader(train_val_set, batch_size=batch_size, shuffle=True)

    net = Classifier.Classifier()
    model_best = net.cuda()
    net.load_state_dict(torch.load('net_params_everyround.pkl'))
    optimizer = torch.optim.Adam(model_best.parameters(), lr=0.00001)  # optimizer 使用 Adam

    for epoch in range(num_epoch):
        epoch_start_time = time.time()
        train_acc = 0.0
        train_loss = 0.0

        model_best.train() # 確保 model 是在 train model (開啟 Dropout 等...)
        for i, data in enumerate(train_val_loader): # 每个循环算一个batchsize的数据
            optimizer.zero_grad() # 用 optimizer 將 model 參數的 gradient 歸零
            train_pred = model_best(data[0].cuda()) # 利用 model 得到預測的機率分佈 這邊實際上就是去呼叫 model 的 forward 函數
            batch_loss = loss(train_pred, data[1].cuda()) # 計算 loss （注意 prediction 跟 label 必須同時在 CPU 或是 GPU 上）
            batch_loss.backward() # 利用 back propagation 算出每個參數的 gradient
            optimizer.step() # 以 optimizer 用 gradient 更新參數值

            train_acc += np.sum(np.argmax(train_pred.cpu().data.numpy(), axis=1) == data[1].numpy())
            train_loss += batch_loss.item()

        # 將結果 print 出來
        print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f' % \
              (epoch + 1, num_epoch, time.time() - epoch_start_time, \
               train_acc / train_val_set.__len__(), train_loss / train_val_set.__len__()))
        torch.save(model_best.state_dict(), 'net_params_everyround.pkl')  # 每一轮都保存

    torch.save(model_best.state_dict(), 'net_params_end.pkl')


def pridiction(path, outputname):
    os.chdir(path)
    image_dir = sorted(os.listdir(path))
    test_x = np.zeros((len(image_dir), 128, 128, 3), dtype=np.uint8)
    for i, file in enumerate(image_dir):
        img = cv2.imread(file)
        test_x[i, :, :] = cv2.resize(img, (128, 128))

    print("Size of Testing data = {}".format(len(test_x)))
    # 建立dataset和dataloader
    test_set = ImgDataset.ImgDataset(test_x, transform=test_transform)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    net = Classifier.Classifier()
    model_best = net.cuda()
    os.chdir('E:\深度学习作业\数据\hw3')
    net.load_state_dict(torch.load('net_params_everyround.pkl'))
    model_best.eval()
    prediction = []
    with torch.no_grad():  # 貌似会避免内存问题
        for i, data in enumerate(test_loader):
            test_pred = model_best(data.cuda())
            test_label = np.argmax(test_pred.cpu().data.numpy(), axis=1)
            for y in test_label:
                prediction.append(y)

    # Bread, Dairy product, Dessert, Egg, Fried
    # food, Meat, Noodles / Pasta, Rice, Seafood, Soup, and Vegetable / Fruit.
    dict = {0: "面包", 1: "乳制品",2: "甜点",3: "蛋",4: "炸货",5: "肉", 6: "面条",
         7: "米饭", 8: "海鲜", 9: "粥", 10: "蔬菜", 11: "其他"
         }
    # 將結果寫入 csv 檔
    with open(outputname + ".csv", 'w') as f:
        f.write('Id,Category\n')
        for i, y in enumerate(prediction):
            f.write('{},{}\n'.format(image_dir[i], dict[y]))


if __name__ == '__main__':
    # testmodel()
    # continue_train_model()
    # save_model_pridiction()
    # pridiction('E:\深度学习作业\数据\hw3\\food-11\\testing', 'predict1')
    pridiction('E:\深度学习作业\数据\hw3\mytest', 'mypredict')

    # os.chdir('E:\深度学习作业\数据\hw3\\food-11\\testing')
    # img = cv2.imread('0000.jpg') # imread好像不能用绝对路径
    # print(img)
    # cv2.resize(img, (128, 128))
