import os
import argparse

import pandas as pd

import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image

import settings
from utils import get_network

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class OceanDataset(Dataset):
    def __init__(self, root, input_data, aug):
        self.file_data = input_data['FileID'].values
        self.label_data = input_data['SpeciesID'].values if 'SpeciesID' in input_data.columns else None
        self.aug = aug

        self.img_data = [str(root+i+'.jpg') for i in self.file_data]

    def __len__(self):
        return len(self.img_data)

    def __getitem__(self, index):
        img = self.img_data[index]
        img = Image.open(img).convert('RGB')

        if self.aug is not None:
            img = self.aug(img)

        if self.label_data is not None:
            return img, self.file_data[index], self.label_data[index]
        else:
            return img, self.file_data[index]


def get_test_dataloader(img_path, test_csv, num_workers, batch_size):
    
    transform_test = transforms.Compose([
        transforms.Resize(size=(128, 128)),  # PIL.Image
        transforms.ToTensor(),
    ])

    test_dataset = OceanDataset(img_path, test_csv, transform_test)
    test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    return test_loader


def get_best_checkpoint():
    root = settings.CHECKPOINT_PATH + '/' + netname
    best_pth_list = []

    for root, dirs, files in os.walk(root):
        for pth in files:
            if pth.endswith('pth') and pth.split('-')[-1].split('.')[0] == 'best':
                best_pth_list.append(pth)

    best_pth_list.sort()
    return os.path.join(root, best_pth_list[-1])


if __name__ == '__main__':

    # 超参数
    parser = argparse.ArgumentParser()
    parser.add_argument('-root', type=str, default='./raw/', help='root path')
    parser.add_argument('-weights', type=str, default=None, help='the weights file you want to test')
    parser.add_argument('-b', type=int, default=16, help='batch size for dataloader')
    parser.add_argument('-w', type=int, default=4, help='number of workers')
    parser.add_argument('-gpu', type=bool, default=True, help='use gpu or not')
    parser.add_argument('-net', type=str, default='resnet50', help='network type')
    parser.add_argument('-gpunum', type=int, default=0, help='use the kth gpu')
    args = parser.parse_args()

    if args.gpu and torch.cuda.is_available():
        device = 'cuda' + ':' + str(args.gpunum)
    else:
        device = 'cpu'

    # 模型
    net, netname = get_network(args, device)

    # 数据
    test_csv = pd.read_csv('./raw/test.csv')  # title

    Ocean_test_loader = get_test_dataloader(
        img_path=args.root+'data/',
        test_csv=test_csv,
        num_workers=args.w,
        batch_size=args.b,
    )

    # 导入模型参数
    if args.weights is None:
        args.weights = get_best_checkpoint()
    net.load_state_dict(torch.load(args.weights))
    print(net)
    net.eval()

    # 开始测试
    print('Start Testing...')
    ans_file = []
    ans_pred = []
    with torch.no_grad():
        for _, (img, fileid) in enumerate(Ocean_test_loader):
            inputs = img.to(device)
            outputs = net(inputs)

            ans_file.extend(fileid)
            ans_pred.extend(outputs.max(1)[1].detach().cpu().numpy())

    ans = [[ans_file[i], ans_pred[i]] for i in range(len(ans_file))]
    ans = pd.DataFrame(ans, columns=['FileID', 'SpeciesID'])
    
    if not os.path.exists(settings.RESULT_DIR):  
        os.makedirs(os.path.join(settings.RESULT_DIR, netname))
    
    ans.to_csv(os.path.join(settings.RESULT_DIR, 'ans.csv'), index=None)
    # mox.file.copy('ans.csv', os.path.join(Context.get_output_path(), 'ans.csv'))
    print('test ans saved')
