from model.muti_task import Muti_Task
from model.DFL_resnet50 import fbresnet50
from utils.util import *
from utils.transform import *
from train import *
from validate import *
from utils.init import *
import sys
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from utils.muti_targets import *
from drawrect import *
from efficientnet_pytorch import EfficientNet

parser = argparse.ArgumentParser(description='PyTorch  Training')
parser.add_argument('--dataroot', default='', metavar='DIR',
                    help='path to dataset')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--resume', default='/home/ubuntu/data/weights/weight_005_efficient_integer_4/model_best.pth.tar',
                    type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('--gpu', default=2, type=int,
                    help='GPU nums to use.')
parser.add_argument('--nclass', default=4, type=int,
                    help='num of classes')
parser.add_argument('--w', default=448, type=int,
                    help='transform, seen as align')
parser.add_argument('--h', default=448, type=int,
                    help='transform, seen as align')

best_prec1 = 0

def label_true(list_pics):
    import os, csv, random
    import numpy as np
    pic_path = r'/home/ubuntu/data/predict/00'
    feature_path = r'/home/ubuntu/data/main_label_feature.csv'
    feature_list = csv.reader(open(feature_path, 'r'))
    feature_dict = {}

    for file in feature_list:
        # feature_dict[file[1]] = float(file[2:][5])
        feature_dict[file[1]] = file[2:][5] + ',' + file[2:][6]
    s = 0

    if list_pics in feature_dict.keys():
        feature = feature_dict[list_pics].split(',')
        feature1 = float(feature[0])
        feature2 = float(feature[1])
        return feature1,feature2
    else:
        return None

def muti_validate_simple(args, val_loader, model):
    model.eval()
    output = 0

    countt0 = 4000
    countt1 = 5000
    countt2 = 6000
    counttf0 = 7000
    counttf1 = 8000
    counttf2 = 9000

    scountt0 = 1000
    scountt1 = 2000
    scountt2 = 3000
    scounttf0 = 100
    scounttf1 = 400
    scounttf2 = 600
    # we may have ten d in data
    for i, (data, _, paths) in enumerate(val_loader):

        if args.gpu is not None:
            data = data.cuda()


        # compute output
        for idx, d in enumerate(data):      # data [batchsize, 10_crop, 3, 448, 448]
            d = d.unsqueeze(0) # d [1, 3, 448, 448]
            output = model(d)
            output_f6 = output[0].cpu().detach().numpy()[0]
            output_f7 = output[1].cpu().detach().numpy()[0]
            file_name = paths[0].split(r'/')[-1]
            labels = label_true(file_name)
            if labels is None:
                label6 = -2
                label7 = -2
            else:
                label6 = labels[0]
                label7 = labels[1]

            print(file_name)
            # print(output_f6)
            # print(output_f7)


            if output_f6.argmax() ==label6:
                nb = 0
                # if output_f6.argmax() ==0:
                #     scountt0 += 1
                #     shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/0/'+str(scountt0)+'.jpg')
                # elif output_f6.argmax() ==1:
                #     scountt1 += 1
                #     shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/1/'+str(scountt1)+'.jpg')
                # elif output_f6.argmax() ==2:
                #     scountt2 += 1
                #     shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/2/'+str(scountt2)+'.jpg')
            else:
                if output_f6.argmax() == 1 and label6 == 0:
                    scounttf0 += 1
                    shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/10/'+str(scounttf0)+'.jpg')
                elif output_f6.argmax() == 0 and label6 == 1:
                    scounttf1 += 1
                    shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/01/'+str(scounttf1)+'.jpg')
                else:
                    scounttf2 += 1
                    # shutil.copy(paths[0],'/home/ubuntu/data/feature6_class_test/02/'+str(scounttf2)+'.jpg')

            # if output_f7.argmax() ==label7:
            #     if output_f7.argmax() ==0:
            #         countt0 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/0/'+str(countt0)+'.jpg')
            #     elif output_f7.argmax() ==1:
            #         countt1 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/1/'+str(countt1)+'.jpg')
            #     elif output_f7.argmax() ==2:
            #         countt2 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/2/'+str(countt2)+'.jpg')
            # else:
            #     if output_f7.argmax() == 1 and label7 == 0:
            #         counttf0 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/10/'+str(counttf0)+'.jpg')
            #     elif output_f7.argmax() == 0 and label7 == 1:
            #         counttf1 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/01/'+str(counttf1)+'.jpg')
            #     else:
            #         counttf2 += 1
            #         shutil.copy(paths[0],'/home/ubuntu/data/feature7_class_test/02/'+str(counttf2)+'.jpg')

            print('f6:predict:{} real:{} possi{}'.format(output_f6.argmax(),label6,max(output_f6)))
            print('f7:predict:{} real:{} possi{}'.format(output_f7.argmax(),label7,max(output_f7)))
    return output

def validate_simple(args, val_loader, model_integration):
    # model.eval()
    output = None
    from concurrent.futures import ThreadPoolExecutor, as_completed
    def ThreadPool(data, keys, model):
        # compute output
        for idx, d in enumerate(data):  # data [batchsize, 10_crop, 3, 448, 448]
            d = d.unsqueeze(0)  # d [1, 3, 448, 448]
            output = model(d)
            output_f1 = output[0].cpu().detach().numpy().argmax()

            file_name = paths[0].split(r'/')[-1]

            print(file_name, keys, output_f1)

            return (file_name, keys, output_f1)
            # with open(r'/home/ubuntu/data/diesease_features/label_feature2.txt', 'a') as f:
            #     f.write('{} {}\n'.format(file_name, output_f1))
    f = open(r'/home/ubuntu/data/sing_disease_feature.txt', 'a')

    list_ord = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14','15']
    # we may have ten d in data

    for i, (data, _, paths) in enumerate(val_loader):

        if args.gpu is not None:
            data = data.cuda()

        pool = ThreadPoolExecutor(max_workers=2)
        futures = {
            pool.submit(
                ThreadPool,
                data,
                keys,
                model

            ):
                model for keys, model in model_integration.items()
        }
        out = [0 for i in range(len(futures))]

        for future in as_completed(futures):
            output = future.result()

            if output[1] in list_ord:
                out[list_ord.index(output[1])] = output[-1]

        out.append(output[0])
        # f.writelines(str(out)+'\n')
    # print(count,num)
    return output



def main():

    global args
    args = parser.parse_args()
    #
    # # model = Muti_Task(k = 10, nclass = 3)
    # model_name = 'efficientnet-b3'
    # model = EfficientNet.from_pretrained(model_name)
    # model._fc = nn.Linear(1536, args.nclass)
    # if args.gpu is not None:
    #     model = nn.DataParallel(model, device_ids=range(args.gpu))
    #     model = model.cuda()
    #     cudnn.benchmark = True
    #
    # # Optionally resume from a checkpoint
    # if args.resume:
    #     if os.path.isfile(args.resume):
    #         checkpoint = torch.load(args.resume)
    #         args.start_epoch = checkpoint['epoch']
    #         best_prec1 = checkpoint['best_prec1']
    #         model.load_state_dict(checkpoint['state_dict'])
    #         # optimizer.load_state_dict(checkpoint['optimizer'])
    #         print('from {} epoch {} best_prec1{}'.format(args.resume, checkpoint['epoch'], best_prec1))
    #     else:
    #         print('DFL-CNN <==> Part2 : Load Network  <==> Failed')

    # testdir = os.path.join(r'/home/ubuntu/data', 'deases_single')
    # ImageFolder to process img

    testdir = r'/home/ubuntu/data/dataset/test_question5'
    feature_dict = {}
    feature_txt = r'/home/ubuntu/data/label_feature.txt'

    transform_test_simple = get_transform_for_test_simple()
    test_dataset_simple = muti_targets(testdir, transform=transform_test_simple)

    test_loader_simple = torch.utils.data.DataLoader(
        test_dataset_simple, batch_size=1, shuffle=True,
        num_workers=args.workers, pin_memory=True, drop_last=False)

    # predict on test set

    # output = mutil_validate_simple(args, test_loader_simple, model)

    weight_path = r'/home/ubuntu/data/weight'
    model_integration = {}
    feature_class = {}
    model_name = 'efficientnet-b3'
    model = EfficientNet.from_pretrained(model_name)
    n_class = 2
    model._fc = nn.Linear(1536, n_class)

    model = nn.DataParallel(model, device_ids=range(2))
    model.cuda()
    cudnn.benchmark = True
    checkpoint = torch.load('/home/ubuntu/data/weight/model_best.pth.tar')

    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model_integration.update({str(5): model})

    # for file in os.listdir(weight_path):
    #     resume = os.path.join(weight_path, file, 'model_best.pth.tar')
    #     n_class = int(file[-1])
    #     feature_class.update({str(file[8:10]): n_class})
    #
    #     model_name = 'efficientnet-b3'
    #     model = EfficientNet.from_pretrained(model_name)
    #
    #     model._fc = nn.Linear(1536, n_class)
    #
    #
    #     model = nn.DataParallel(model, device_ids=range(2))
    #     model.cuda()
    #     cudnn.benchmark = True
    #     checkpoint = torch.load(resume)
    #
    #     model.load_state_dict(checkpoint['state_dict'])
    #     model.eval()
    #     model_integration.update({str(file[8:10]): model})



    output = validate_simple(args, test_loader_simple, model_integration)

    print(output)
    # with open(r'/home/ubuntu/data/sing_disease_feature.txt', 'a') as f:
    #     f.write(output)
        # f.write('DFL-CNN <==> Test Total <==> epoch{} Top1 {:.3f}%\n'.format(epoch, top1.avg[0]))


if __name__ == '__main__':
    main()
