from torch.utils.data import DataLoader
from utils.dataset import TestDataset
import albumentations as A
from albumentations import pytorch as AT
import numpy as np
import pandas as pd
from utils.model import *
import ttach as tta
from tqdm import tqdm
import torch.nn as nn
import os
import json
import time

# 单模 KFolds TTA
k_folds = 10
input_size = 224
nc = 137
batch_size = 64
nw = 8
test_csv = '../Dataset-fu/test.csv'
test_path = '../Dataset-fu/test/'
# model_name = 'tf_efficientnetv2_s_in21ft1k'
# model_name = 'resnest50d'
model_name = 'tf_efficientnet_b4_ns'
save_dir = 'pred-csv-fu'
save_csv = os.path.join(save_dir, model_name) + os.sep
if not os.path.exists(save_csv):
    os.makedirs(save_csv)

albu_transform = {
    'test': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # # 默认反射填充  零填充 border_mode=cv2.BORDER_CONSTANT
        # A.PadIfNeeded(input_size, input_size),
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.CenterCrop(input_size, input_size),
        A.Normalize(),  # default imagenet std and mean
        # A.Normalize(mean=(0.638, 0.568, 0.570),
        #             std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ])
}

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda_info = torch.cuda.get_device_properties(0)
print("using {} {} {}MB.".format(device, cuda_info.name, cuda_info.total_memory / 1024 ** 2))

test_dataset = TestDataset(test_csv, test_path, transform=albu_transform['test'])
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=nw)
print('Test total {} images'.format(len(test_dataset)))
# read num_to_class
json_path = './class_indices.json'
assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)
json_file = open(json_path, "r")
# {'0': 0, '1': 1, ...}
num_to_class = json.load(json_file)

# predict
model = timm_model(model_name, pretrained=False, num_classes=nc)
model.to(device)

model_path_list = [
    # 'tf_efficientnet_b4_ns/model-fold-0-epoch29-val_acc-0.9086.pth',  # val 90.30 test 87.096
    # 'tf_efficientnet_b4_ns/model-fold-1-epoch27-val_acc-0.9018.pth',
    # 'tf_efficientnet_b4_ns/model-fold-2-epoch25-val_acc-0.9027.pth',
    # 'tf_efficientnet_b4_ns/model-fold-3-epoch28-val_acc-0.8991.pth',
    # 'tf_efficientnet_b4_ns/model-fold-4-epoch26-val_acc-0.8991.pth',
    'tf_efficientnet_b4_ns/model-fold-5-epoch27-val_acc-0.9003.pth',
    'tf_efficientnet_b4_ns/model-fold-6-epoch29-val_acc-0.9047.pth',
    'tf_efficientnet_b4_ns/model-fold-7-epoch29-val_acc-0.901.pth',
    'tf_efficientnet_b4_ns/model-fold-8-epoch27-val_acc-0.9076.pth',
    'tf_efficientnet_b4_ns/model-fold-9-epoch27-val_acc-0.9049.pth',

    # 'tf_efficientnetv2_s_in21ft1k/model-fold-0-epoch26-val_acc-0.9107.pth',  # val 91.25 test 87.362
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-1-epoch27-val_acc-0.9086.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-2-epoch29-val_acc-0.9182.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-3-epoch28-val_acc-0.9083.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-4-epoch28-val_acc-0.9097.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-5-epoch28-val_acc-0.9126.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-6-epoch27-val_acc-0.91.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-7-epoch26-val_acc-0.9168.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-8-epoch28-val_acc-0.9124.pth',
    # 'tf_efficientnetv2_s_in21ft1k/model-fold-9-epoch29-val_acc-0.9174.pth',

    # 'resnest50d/model-fold-0-epoch27-val_acc-0.8944.pth',  # val 90.06 test 86.925
    # 'resnest50d/model-fold-1-epoch29-val_acc-0.9009.pth',
    # 'resnest50d/model-fold-2-epoch29-val_acc-0.9031.pth',
    # 'resnest50d/model-fold-3-epoch29-val_acc-0.9006.pth',
    # 'resnest50d/model-fold-4-epoch28-val_acc-0.9027.pth',
    # 'resnest50d/model-fold-5-epoch28-val_acc-0.894.pth',
    # 'resnest50d/model-fold-6-epoch28-val_acc-0.9053.pth',
    # 'resnest50d/model-fold-7-epoch29-val_acc-0.9008.pth',
    # 'resnest50d/model-fold-8-epoch26-val_acc-0.9037.pth',
    # 'resnest50d/model-fold-9-epoch28-val_acc-0.9007.pth',

    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-0-epoch27-val_acc-0.9146.pth',  # val 91.955 test
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-1-epoch27-val_acc-0.9254.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-2-epoch28-val_acc-0.9173.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-3-epoch27-val_acc-0.9157.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-4-epoch28-val_acc-0.9175.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-5-epoch28-val_acc-0.9169.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-6-epoch28-val_acc-0.9229.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-7-epoch28-val_acc-0.9243.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-8-epoch28-val_acc-0.9198.pth',
    # 'tf_efficientnetv2_s_in21ft1k-pseude/model-fold-9-epoch29-val_acc-0.9211.pth',
]

# load the all folds
# for test_fold in range(k_folds):
for model_path, test_fold in zip(model_path_list, range(k_folds)):
    print('--------------------------------------')
    print(f'{model_name}-fold-{test_fold}')
    print('--------------------------------------')
    # model_path = f'logs/resnext50/model-fold-3-epoch34-val_f1-0.9933.pth'
    saveFileName = f'{save_csv}pred-fold-{test_fold}.csv'
    # saveFileName = f'{save_csv}submission.csv'
    model.load_state_dict(torch.load(model_path))
    print(f'load weight {model_path}')
    print(f'save name: {saveFileName}')
    time.sleep(0.1)

    # Make sure the model is in eval mode.
    # Some modules like Dropout or BatchNorm affect if the model is in training mode.
    model.eval()
    tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.flip_transform())

    # Initialize a list to store the predictions.
    predictions = []
    # Iterate the testing set by batches.
    for batch in tqdm(test_loader):
        imgs = batch
        with torch.no_grad():
            # logits = model(imgs.to(device))  # do not use tta
            logits = tta_model(imgs.to(device))
            # logits1 = tta_model(imgs.to(device))
            # logits2 = tta_model2(imgs.to(device))

            # logits = 0.5*logits1+0.5*logits2

        # Take the class with greatest logit as prediction and record it.
        predictions.extend(logits.argmax(dim=-1).cpu().numpy().tolist())

    preds = []
    for i in predictions:
        preds.append(num_to_class[str(i)])  # to_str

    test_data = pd.read_csv(test_csv)
    test_data['category_id'] = pd.Series(preds)
    pred_csv = pd.concat([test_data['image_id'], test_data['category_id']], axis=1)
    pred_csv.to_csv(saveFileName, index=False)
    print(f"{model_name}-fold-{test_fold} predict done!")
