import os, sys, time
import pickle
sys.path.append('/home/ubuntu/data/Image_Retrieval')
sys.path.append('/home/ubuntu/data/Image_Retrieval/train')
sys.path.insert(0, '/home/ubuntu/data')
from PIL import Image
from io import BytesIO
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import pylab
from Image_Retrieval.train.delf import Delf_V1
from Image_Retrieval.helper.feeder import Feeder
from Image_Retrieval.helper import matcher
import numpy as np
from matplotlib.pyplot import imshow
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
import numpy as np
from  sklearn.decomposition import PCA
from torchvision import  transforms
import torch.backends.cudnn as cudnn
from efficientnet_pytorch import EfficientNet
import torch.nn as nn
from torch.nn import functional as F
from web.app.represent_feature import VectorGeneration
from web.app.retrieval import MedicalRecordRetrieval
def resize_image(image, target_size=300):
    def calc_by_ratio(a, b):
        return int(a * target_size / float(b))

    size = image.size
    if size[0] < size[1]:
        w = calc_by_ratio(size[0], size[1])
        h = target_size
    else:
        w = target_size
        h = calc_by_ratio(size[1], size[0])

    image = image.resize((w, h), Image.BILINEAR)
    return image


def get_and_cache_image(image_path, basewidth=None):
    image = Image.open(image_path)
    if basewidth is not None:
        image = resize_image(image, basewidth)
    imgByteArr = BytesIO()
    image.save(imgByteArr, format='PNG')
    imgByteArr = imgByteArr.getvalue()
    return image, imgByteArr


def get_result(feeder, query):
    pil_image = []
    byte_image = []
    for _, v in enumerate(query):
        pil, byte = get_and_cache_image(v)
        pil_image.append(pil)
        byte_image.append(byte)

    # feed and get output.
    ## 'descriptor_np_list','attention_np_list',location_np_list'
    outputs = feeder.feed_to_compare(query, pil_image)
    print('# of extracted feature (qeuery):', len(outputs[0]['descriptor_np_list']))
    print('# of extracted feature (db):', len(outputs[0]['descriptor_np_list']))

    att1 = matcher.get_attention_image_byte(outputs[0]['attention_np_list'])
    att2 = matcher.get_attention_image_byte(outputs[1]['attention_np_list'])

    side_by_side_comp_img_byte, score = matcher.get_ransac_image_byte(
        byte_image[0],
        outputs[0]['location_np_list'],
        outputs[0]['descriptor_np_list'],
        byte_image[1],
        outputs[1]['location_np_list'],
        outputs[1]['descriptor_np_list'])
    print('matching inliner num:', score)
    return side_by_side_comp_img_byte, att1, att2, score

def get_single_result(outputs, byte_image):

    # feed and get output.
    ## 'descriptor_np_list','attention_np_list',location_np_list'
    # outputs = feeder.feed_to_compare(query, pil_image)
    # print('# of extracted feature (qeuery):', len(outputs[0]['descriptor_np_list']))
    # print('# of extracted feature (db):', len(outputs[0]['descriptor_np_list']))

    # att1 = matcher.get_attention_image_byte(outputs[0]['attention_np_list'])
    import time
    start = time.time()
    # att2 = matcher.get_attention_image_byte(outputs[1]['attention_np_list'])
    # print(time.time() - start)
    side_by_side_comp_img_byte, score = matcher.get_ransac_image_byte(
        byte_image[0],
        outputs[0]['location_np_list'],
        outputs[0]['descriptor_np_list'],
        byte_image[1],
        outputs[1]['location_np_list'],
        outputs[1]['descriptor_np_list'])
    # print('matching inliner num:', score)
    return side_by_side_comp_img_byte, score


def single_match(myfeeder, query):
    # test 1
    # prefix = '/home/ubuntu/data/Image_Retrieval/dataset/query/0/'
    # query = [prefix + '5043.jpg', prefix + '462.jpg']
    result_image_byte, att1, att2, score = get_result(myfeeder, query)
    return score
    # plt.figure(figsize=(16, 12))
    # result_image = Image.open(BytesIO(result_image_byte))
    # imshow(np.asarray(result_image), aspect='auto')
    # plt.savefig(r'/home/ubuntu/data/Image_Retrieval/dataset/0.jpg')
    # plt.show()
    # pylab.show()

    # plt.figure(figsize=(4, 3))
    # att1_image = Image.open(BytesIO(att1))
    # imshow(np.asarray(att1_image), aspect='auto')
    # pylab.show()
    # plt.figure(figsize=(4, 3))
    # att2_image = Image.open(BytesIO(att2))
    # imshow(np.asarray(att2_image), aspect='auto')
    # pylab.show()

    # a array to picture
    # a = (torch.squeeze(scaled_x.cpu()).numpy()*255).astype('uint8')
    # r = Image.fromarray(a[0]).convert('L')
    # g = Image.fromarray(a[1]).convert('L')
    # b = Image.fromarray(a[2]).convert('L')
    # image = PIL.Image.merge("RGB", (r, g, b))
    # image.save('/home/ubuntu/data/dataset/agg'+ '.png')
def load_model():
    """Load the pre-trained model, you can use your model just as easily.
    """
    global model_integration,feature_class,model

    weight_path = r'/home/ubuntu/data/weights'
    model_integration = {}
    feature_class = {}
    use_gpu = True
    for file in os.listdir(weight_path):
        resume = os.path.join(weight_path,file, 'model_best.pth.tar')
        n_class = int(file[-1])
        feature_class.update({str(file[8:10]):n_class})

        model_name = 'efficientnet-b3'
        model = EfficientNet.from_pretrained(model_name)
        model._fc = nn.Linear(1536, n_class)
        num_gpu = 2
        if use_gpu:
            model = nn.DataParallel(model, device_ids=range(num_gpu))
            model.cuda()
            cudnn.benchmark = True
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()
        model_integration.update({str(file[8:10]):model})
    print('Loaded pretrained weights for efficientnet-b3')

def feature_tongue(file):
    result = {'success': False}

    # assert  sys.exit(os.path.join(path_file,file))
    def ThreadPool(image, keys, model, result):
        preds = F.softmax(model(image), dim=1)
        results = preds[0].cpu().detach().numpy()
        # r = '{}:{} '.format(int(keys)-1, results.argmax()) #feature:lable
        result['predictions'].append((int(keys)-1, results.argmax()))
        return result

    image = Image.open(file)
    image = prepare_image(image, target_size=(448, 448))
    result['predictions'] = list()

    pool = ThreadPoolExecutor(max_workers=1)
    futures = {
        pool.submit(
            ThreadPool,
            image,
            keys,
            model,
            result
        ):
            model for keys, model in model_integration.items()
    }

    for future in as_completed(futures):
        result = future.result()

    sd = sorted(result['predictions'],key= lambda key:key[0],reverse=False)
    res = [dic for _,dic in sd]
    return res

def scale_keep_ar_min_fixed(img, fixed_min):
    ow, oh = img.size

    if ow < oh:

        nw = fixed_min

        nh = nw * oh // ow

    else:

        nh = fixed_min

        nw = nh * ow // oh
    return img.resize((nw, nh), Image.BICUBIC)

def prepare_image(image, target_size):
    """Do image preprocessing before prediction on any data.
    """
    use_gpu = True
    if image.mode != 'RGB':
        image = image.convert("RGB")

    # Resize the input image nad preprocess it.
    image = transforms.Lambda(lambda img: scale_keep_ar_min_fixed(img, 448))(image)
    image = transforms.CenterCrop((448, 448))(image)
    image = transforms.ToTensor()(image)

    # Convert to Torch.Tensor and normalize.
    image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)

    # Add batch_size axis.
    image = image[None]
    if use_gpu:
        image = image.cuda()
    return torch.autograd.Variable(image, volatile=True)


if __name__ == "__main__":
    feeder_config = {
        'GPU_ID': 2,
        'IOU_THRES': 0.92,
        'ATTN_THRES': 0.21,
        'TARGET_LAYER': 'layer3',
        'TOP_K': 50,
        'PCA_PARAMETERS_PATH': '/home/ubuntu/data/Image_Retrieval/train/repo/devel/pca/pca.h5',
        'PCA_DIMS': 60,
        'USE_PCA': True,
        'SCALE_LIST': [1.0], #[0.7071,0.842,0.91, 1.0],
        'LOAD_FROM': '/home/ubuntu/data/Image_Retrieval/train/repo/devel/keypoint/ckpt/bestshot.pth.tar',
        'ARCH': 'resnet50',
        'EXPR': 'devel',
        'WORKERS':1,
        'FILEPATH_DELF':r'/home/ubuntu/data/Image_Retrieval/dataset/test_index_pca0.delf',
    }
    # load tongue feature model
    load_model()

    data_tongue_path = r'/home/ubuntu/data/Image_Retrieval/dataset/feature_test/0'
    feature_list_path = r'/home/ubuntu/data/Image_Retrieval/dataset/feature_list.pk'
    prefix = r'/home/ubuntu/data/Image_Retrieval/dataset/query/0/'
    dateset_files = r'/home/ubuntu/data/Image_Retrieval/dataset/feature_test'
    cluster_level_1_path = r'/home/ubuntu/data/Image_Retrieval/dataset/cluster_level_1.pk'

    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda:0")
    import shutil
    # read feature
    feature_dic = {}
    data_features = pickle.load(open(feature_list_path, 'rb'))
    for feature in data_features:
        feature_dic[feature[0]] = feature[-1]
    # read cluster feature offline
    # list cluster[0]:center_,
    # cluster[1:]:nearest node image_name
    with open(cluster_level_1_path, 'rb') as file:
        cluster = pickle.load(file)
        # list ->dict ->filename,location_np_list,descriptor_np_list,attention_np_list
        # outputs[0]['attention_np_list']
        data_features = pickle.load(open(feeder_config['FILEPATH_DELF'], 'rb'))
        dic_filename_descrip = {}
        for i in range(len(data_features)):
            feature_config = {
                'location_np_list': data_features[i]['location_np_list'],
                'descriptor_np_list': data_features[i]['descriptor_np_list'],
            }
            dic_filename_descrip.update({data_features[i]['filename'][0]: feature_config})
    start = time.time()
    # query dandle
    query = [os.path.join(prefix, '5043.jpg')]
    pil_image = []
    byte_image = []
    for _, v in enumerate(query):
        pil, byte = get_and_cache_image(v)
        pil_image.append(pil)
        byte_image.append(byte)

    myfeeder = Feeder(feeder_config)
    query_outputs = myfeeder.feed_to_compare(query, pil_image)

    # caculate sample belong to  cluster center
    sample = query_outputs[0]['descriptor_np_list'][0]
    dist = np.sum(np.multiply(sample - cluster[0], sample - cluster[0]), 1)
    centroid_label = np.argmin(dist)
    cluster_index = {}
    for i in range(1,len(cluster)):
        cluster_index.update({cluster[i][0]:i})
    cluster_1 = cluster[cluster_index[centroid_label]][1]

    dist1 = np.sum(np.multiply(sample - cluster_1[0], sample - cluster_1[0]), 1)
    centroid_label = np.argmin(dist1)

    img_list = cluster_1[1][centroid_label+1]

    score_list_nearest = []

    for num, file in enumerate(img_list):

        outputs = [query_outputs[0],dic_filename_descrip[file]]
        # print(num)
        byte_image = [1,2]
        # result_image_byte, att1, att2, score = get_result(myfeeder, query)
        result_image_byte, score = get_single_result(outputs, byte_image)
        score_list_nearest.append((score,file))
    top_k = 130
    score_list = sorted(score_list_nearest ,key=lambda x:x[0],reverse=True)[:top_k]

    print(time.time() - start,333333333333333)

    query_feature = feature_tongue(query[0])
    similar_list = []
    query_vec = VectorGeneration().represent_tongue(query_feature)
    for score_image,image in score_list:
        dataset_vec = VectorGeneration().represent_tongue(feature_dic[image])
        score_tongue_vec = MedicalRecordRetrieval()._cal_cosine(query_vec, dataset_vec)
        print((image,score_image,score_tongue_vec))
        print(query_feature)
        print(feature_dic[image])
        similar_list.append((image,score_image,score_tongue_vec))


    # def  cluster_thread(file):
    #     outputs = [query_outputs[0], dic_filename_descrip[file]]
    #
    #     byte_image = [1, 2]
    #     # result_image_byte, att1, att2, score = get_result(myfeeder, query)
    #     result_image_byte, score = get_single_result(outputs, byte_image)
    #     return score,file
    #
    # pool = ThreadPoolExecutor(max_workers=16)
    # futures = {
    #     pool.submit(
    #         cluster_thread,
    #         file
    #     ):
    #         file for file in img_list
    # }
    # for future in as_completed(futures):
    #     score,file = future.result()
    #     score_list_nearest.append(score)
    #     image_list_nearest.append(file)

 # feature_list = []
    # count = 0
    # for file in os.listdir(data_tongue_path):
    #
    #     feature_list.append([file,feature_tongue(os.path.join(data_tongue_path,file))])
    #     count += 1
    #     print(count)
    # f = open(feature_list_path, 'wb')
    # pickle.dump(feature_list, f)
