'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import os

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.utils.model_zoo as model_zoo

import torchvision
import torchvision.models as models
import torchvision.transforms as transforms

import config
from models import *
from utils import *
from PIL import Image
import csv

model = torch.load('./checkpoint/ckpt.t7')
net = model['net']
print(model)
transform = transforms.Compose([
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

pre_path = './test'

class VOCSegDataset(object):
    '''
    voc dataset
    '''

    def __init__(self, path, transforms):
        self.transforms = transforms
        self.path = path
        data_list = self.read_images()
        self.data_list = data_list
        print('Read ' + str(len(self.data_list)) + ' images')

    def read_images(self):
        txt_fname = 'test.txt'
        with open(txt_fname, 'r') as f:
            images = f.read().split()
        data = [i for i in images]
        return data

    def __getitem__(self, idx):
        img_name = self.data_list[idx]
        img = Image.open(os.path.join(self.path , img_name))
        img = self.transforms(img)
        return img , img_name

    def __len__(self):
        return len(self.data_list)


preset = VOCSegDataset(pre_path, transform)
preloader = torch.utils.data.DataLoader(preset, batch_size=16, shuffle=False, num_workers=2)

val_path = "./data/val"
valset = torchvision.datasets.ImageFolder(val_path, transform=transform)
valloader = torch.utils.data.DataLoader(valset, batch_size=16, shuffle=False, num_workers=2)

classes = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100')
classes2 = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100']
classes2.sort()
print(classes2)

use_cuda = torch.cuda.is_available()

if use_cuda:
    net.cuda()

criterion = nn.CrossEntropyLoss()
def val(epoch):

    global best_acc
    global best_epoch

    net.eval()
    val_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(valloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        val_loss += loss.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        progress_bar(batch_idx, len(valloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
            % (val_loss/(batch_idx+1), 100.*correct/total, correct, total))

    print("Epoch: ", epoch, "Acc: ", 100.*correct/total, correct, total)
    # Save checkpoint.

val(1)

result = []

net.eval()
for batch_idx, inputs in enumerate(preloader):
    imgname = inputs[1]
    inputs = inputs[0]
    if use_cuda:
        inputs = inputs.cuda()
    inputs = Variable(inputs)
    outputs = net(inputs)
    _, predicted = torch.max(outputs.data, 1)
    for i in range(len(imgname)):
        result.append([imgname[i],classes2[predicted[i]]])

with open("result.txt","w") as file_writer:
    for i in range(len(result)):
        file_writer.write(result[i][0]+" "+result[i][1]+"\n")
