from torch.utils.data import DataLoader
from utils.dataset import TestDataset
import albumentations as A
from albumentations import pytorch as AT

import pandas as pd
from utils.model import *
import ttach as tta
from tqdm import tqdm
import random
import os
import json
import time

# multi models KFolds TTA
k_folds = 10
input_size = 224
nc = 137
batch_size = 64
nw = 8
test_csv = '../Dataset/test_clean.csv'  # 缺少 ['a2411.jpg']
test_path = '../Dataset/test/'

model1_name = 'resnest50d'
model2_name = 'tf_efficientnetv2_s_in21ft1k'
model3_name = 'tf_efficientnet_b4_ns'

save_dir = 'pred-csv'
save_csv = os.path.join(save_dir, 'ress50_effv2s_effb4_k10') + os.sep
if not os.path.exists(save_csv):
    os.makedirs(save_csv)

albu_transform = {
    'test': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # A.PadIfNeeded(input_size, input_size),
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.CenterCrop(input_size, input_size),
        A.Normalize(),  # default imagenet std and mean
        # A.Normalize(mean=(0.638, 0.568, 0.570),
        #             std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ])
}

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda_info = torch.cuda.get_device_properties(0)
print("using {} {} {}MB.".format(device, cuda_info.name, cuda_info.total_memory / 1024 ** 2))

test_dataset = TestDataset(test_csv, test_path, transform=albu_transform['test'])
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=nw)
print('Test total {} images'.format(len(test_dataset)))
# read num_to_class
json_path = './class_indices.json'
assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)
json_file = open(json_path, "r")
# {'0': 0, '1': 1, ...}
num_to_class = json.load(json_file)

# predict
model1 = timm_model(model1_name, pretrained=False, num_classes=nc).to(device)
model2 = timm_model(model2_name, pretrained=False, num_classes=nc).to(device)
model3 = timm_model(model3_name, pretrained=False, num_classes=nc).to(device)

model1_path_list = [  # val 90.06 test 86.925
    'resnest50d/model-fold-0-epoch27-val_acc-0.8944.pth',
    'resnest50d/model-fold-1-epoch29-val_acc-0.9009.pth',
    'resnest50d/model-fold-2-epoch29-val_acc-0.9031.pth',
    'resnest50d/model-fold-3-epoch29-val_acc-0.9006.pth',
    'resnest50d/model-fold-4-epoch28-val_acc-0.9027.pth',
    'resnest50d/model-fold-5-epoch28-val_acc-0.894.pth',
    'resnest50d/model-fold-6-epoch28-val_acc-0.9053.pth',
    'resnest50d/model-fold-7-epoch29-val_acc-0.9008.pth',
    'resnest50d/model-fold-8-epoch26-val_acc-0.9037.pth',
    'resnest50d/model-fold-9-epoch28-val_acc-0.9007.pth',
]

model2_path_list = [  # val 91.25 test 87.362
    'tf_efficientnetv2_s_in21ft1k/model-fold-0-epoch26-val_acc-0.9107.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-1-epoch27-val_acc-0.9086.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-2-epoch29-val_acc-0.9182.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-3-epoch28-val_acc-0.9083.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-4-epoch28-val_acc-0.9097.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-5-epoch28-val_acc-0.9126.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-6-epoch27-val_acc-0.91.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-7-epoch26-val_acc-0.9168.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-8-epoch28-val_acc-0.9124.pth',
    'tf_efficientnetv2_s_in21ft1k/model-fold-9-epoch29-val_acc-0.9174.pth',
]

model3_path_list = [  # val 90.30 test 87.096
    'tf_efficientnet_b4_ns/model-fold-0-epoch29-val_acc-0.9086.pth',
    'tf_efficientnet_b4_ns/model-fold-1-epoch27-val_acc-0.9018.pth',
    'tf_efficientnet_b4_ns/model-fold-2-epoch25-val_acc-0.9027.pth',
    'tf_efficientnet_b4_ns/model-fold-3-epoch28-val_acc-0.8991.pth',
    'tf_efficientnet_b4_ns/model-fold-4-epoch26-val_acc-0.8991.pth',
    'tf_efficientnet_b4_ns/model-fold-5-epoch27-val_acc-0.9003.pth',
    'tf_efficientnet_b4_ns/model-fold-6-epoch29-val_acc-0.9047.pth',
    'tf_efficientnet_b4_ns/model-fold-7-epoch29-val_acc-0.901.pth',
    'tf_efficientnet_b4_ns/model-fold-8-epoch27-val_acc-0.9076.pth',
    'tf_efficientnet_b4_ns/model-fold-9-epoch27-val_acc-0.9049.pth',
]
random.seed(42)
random.shuffle(model1_path_list)  # 7 3 2 8 5 6 9 4 0 1
random.seed(233)
random.shuffle(model2_path_list)  # 5 9 0 1 4 7 6 3 8 2
random.seed(2021)
random.shuffle(model3_path_list)  # 2 7 9 3 0 5 1 4 8 6
for model1_path, model2_path, model3_path, test_fold in zip(model1_path_list, model2_path_list,
                                                            model3_path_list, range(k_folds)):
    print('--------------------------------------')
    print(f'fold-{test_fold}')
    print('--------------------------------------')
    saveFileName = f'{save_csv}pred-fold-{test_fold}.csv'
    print(f'load weight {model1_path}')
    print(f'load weight {model2_path}')
    print(f'load weight {model3_path}')
    print(f'save name: {saveFileName}')
    model1.load_state_dict(torch.load(model1_path))
    model2.load_state_dict(torch.load(model2_path))
    model3.load_state_dict(torch.load(model3_path))
    time.sleep(0.1)

    # Make sure the model is in eval mode.
    # Some modules like Dropout or BatchNorm affect if the model is in training mode.
    model1.eval()
    model2.eval()
    model3.eval()
    # tta_model1 = tta.ClassificationTTAWrapper(model1, tta.aliases.five_crop_transform(input_size, input_size))
    # tta_model2 = tta.ClassificationTTAWrapper(model2, tta.aliases.five_crop_transform(input_size, input_size))
    # tta_model3 = tta.ClassificationTTAWrapper(model3, tta.aliases.five_crop_transform(input_size, input_size))

    tta_model1 = tta.ClassificationTTAWrapper(model1, tta.aliases.flip_transform())
    tta_model2 = tta.ClassificationTTAWrapper(model2, tta.aliases.flip_transform())
    tta_model3 = tta.ClassificationTTAWrapper(model3, tta.aliases.flip_transform())

    # Initialize a list to store the predictions.
    predictions = []
    # Iterate the testing set by batches.
    for batch in tqdm(test_loader):
        imgs = batch
        with torch.no_grad():
            # logits = model(imgs.to(device))  # do not use tta
            # logits = tta_model(imgs.to(device))
            logits1 = tta_model1(imgs.to(device))
            logits2 = tta_model2(imgs.to(device))
            logits3 = tta_model3(imgs.to(device))

            logits = 1 * logits1 + 1 * logits2 + 1 * logits3

        # choose argmax prob
        predictions.extend(logits.argmax(dim=-1).cpu().numpy().tolist())

    preds = []
    for i in predictions:
        preds.append(num_to_class[str(i)])  # to_str

    test_data = pd.read_csv(test_csv)
    test_data['category_id'] = pd.Series(preds)
    pred_csv = pd.concat([test_data['image_id'], test_data['category_id']], axis=1)
    only_one_gif_format = pd.Series({'image_id': 'a2411.jpg', 'category_id': 1})  # append gif format
    pred_csv = pred_csv.append(only_one_gif_format, ignore_index=True)
    pred_csv.to_csv(saveFileName, index=False)
    print(f"fold-{test_fold} predict done!")
