from utils import calc_psnr_and_ssim,get_crop_im,get_clothes
from model import Vgg19,MainNet

import os
import numpy as np
from imageio import imread, imsave
from PIL import Image
import time
import torch 
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pandas as pd

import torchvision.utils as utils


class Trainer:
    def __init__(self, model):
        self.model = model
        self.num_gpu = 1
        self.device = torch.device('cuda')

    def get_reftensor_lr(self,crop,k):
        ### Ref and Ref_sr
        ### LR and LR_sr
        LR_sr = np.array(crop)[:,:,:3]
        # LR_sr = imread(lr_path)[:,:,:3]

        h1, w1 = LR_sr.shape[:2]
        LR_sr = np.array(Image.fromarray(LR_sr).resize((int(w1//k), int(h1//k)), Image.BICUBIC))
        LR_sr = LR_sr / 127.5 - 1.
        Ref_sr_t = torch.from_numpy(LR_sr.transpose((2,0,1))).unsqueeze(0).float().to(self.device)

        Ref_sr_t = self.model(Ref_sr_t)
        # print('Ref_sr_t shape', Ref_sr_t.shape)

        return Ref_sr_t

    def get_reftensor_ref(self,Ref,k):
        ### Ref and Ref_sr
        # Ref = imread(refpath)
        # print('Ref',Ref.shape)
        Ref = Ref[:,:,:3]
        # Image.fromarray(Ref.astype('uint8')).save('test.png')
        h2, w2 = Ref.shape[:2]
        h2, w2 = h2//k*k, w2//k*k
        Ref = Ref[:h2, :w2, :]

        w3 = w2//k
        h3 = h2//k

        if w3 < 4 or h3 < 4:
            w3 = w2
            h3 = h2

        Ref_sr = np.array(Image.fromarray(Ref).resize((w3, h3), Image.BICUBIC))
        # print(Ref_sr.shape)
        Ref_sr = Ref_sr.astype(np.float16)
        Ref_sr = Ref_sr / 127.5 - 1.

        Ref_sr_t = torch.from_numpy(Ref_sr.transpose((2,0,1))).unsqueeze(0).float().to(self.device)
        return Ref_sr_t

    def ref(self, inlist, k):
        self.model.eval()
        res = dict()
        for i, ref_path in enumerate(inlist[:1]):
            imgpath = ref_path.split('/')[-1]
            print(i, imgpath)
            im = Image.open(ref_path)
            # im = ref_padding(im)
            # im.save('tmp.png')
            w,h = im.size
            if w < 40 or h < 40:
                print(i, imgpath,'=============================')
                continue
            with torch.no_grad():
                Ref_sr_t = self.get_reftensor_ref(np.array(im),k)
                re = self.model(Ref_sr_t)
                # re = re[:,:64,:,:]
                # re = torch.sum(re, 1, True)
                refsr_lv3_unfold = re.squeeze().cpu().numpy()
                torch.cuda.empty_cache()
            refsr_lv3_unfold = refsr_lv3_unfold*127.5
            res[imgpath] = refsr_lv3_unfold.astype('uint8')
        return res

    # TODO: modify
    def search_list(self, indir, file_name, targets_info):
        self.model.eval()
        search_res = dict()
        inlist = os.listdir(indir)
        inlist.sort()
        
        with torch.no_grad():
            for i, imgpath in enumerate(inlist):
                since = time.time()
                # try:
                lrpath = os.path.join(indir, imgpath)  ## 原图
                lrid = lrpath.split('/')[-1].split('.')[0]

                # lrpath_mask = os.path.join('tmp', 'mask-' + lrid + '.png')
                # get_clothes(imgpath, lrpath_mask)  ## 阿里云服务，得到衣服抠图
                _,crop = get_crop_im(lrpath)  ## 衣服上得到切片
                t0 = time.time()

                # get different scales
                lr1 = self.get_reftensor_lr(crop, 1.5)  # [1, 128, 21, 21]
                lr2 = self.get_reftensor_lr(crop, 2)  # [1, 128, 15, 15]

                # TODO: Single query
                t1 = time.time()
                tmp_res1, tmp1_sim = get_scale_res(targets_info, lr1, lr2, lrid)
                search_res[lrid] = [tmp_res1, tmp1_sim]
                
                print('-----lrid:{}, -----rank: {} ----, ------sim: {:.4f}, -----elapsed: {:.2f}' \
                    .format(lrid , tmp_res1, tmp1_sim, time.time()-since))
                torch.cuda.empty_cache()

            np.save('search_res' + file_name + '.npy',search_res)
            return search_res


def get_scale_res(targets_info, lr1, lr2, lrid):
    lr1 = query_unfold(lr1)  # [1, 1152, 49]
    lr2 = query_unfold(lr2)

    res1 = []
    res2 = []
    with torch.no_grad():
        tar_names = targets_info["tar_names"]
        targets = targets_info["targets"]
        batch_size = 1024
        remainder = targets.shape[0] % batch_size

        for ind in range(0, targets.shape[0], batch_size):
            b_targets = targets[ind:ind + max(batch_size, remainder)]
            b_targets = b_targets.to(torch.device('cuda'))
            b_tarnames = tar_names[ind:ind + max(batch_size, remainder)]
            res1 = res1 + get_maxrel(lr1, b_targets, b_tarnames)
            res2 = res2 + get_maxrel(lr2, b_targets, b_tarnames)

        # for tar, tar_name in targets:
        #     tar = tar.to('cuda')  # [64, 128, 46, 31]
        #     res1 = res1 + get_maxrel(lr1, tar, tar_name)  # add to list
        #     res2 = res2 + get_maxrel(lr2, tar, tar_name)

        res1,res1_sim = get_res(res1, lrid)  # get top-20
        res2,res2_sim = get_res(res2, lrid)
        a = res1.index(lrid)
        b = res2.index(lrid)
        torch.cuda.empty_cache()
        return [a,res1_sim[a]] if res1_sim[a] > res2_sim[b] else [b, res2_sim[b]]


def get_res(res,lrid):
    res = sorted(res, key=lambda x: x[1], reverse=True)  # 按照某一列进行排序的方法
    res_20 = [i[0].split('.')[0] for i in res]
    res_20_sim = [i[1] for i in res]
    res_20.append(lrid)
    res_20_sim.append(0)
    return res_20, res_20_sim


def target_unfold(target):
    target = F.unfold(target, kernel_size=(3, 3), padding=1)
    target = target.permute(0, 2, 1)
    return F.normalize(target, dim=2)  # [N, Hr*Wr, C*k*k]


def query_unfold(query):
    query = F.unfold(query, kernel_size=(3, 3), padding=1,stride=3)
    return F.normalize(query, dim=1)  # [N, C*k*k, H*W]


def get_maxrel(lr1,tar,tar_name):
    # tar = F.unfold(tar, kernel_size=(3, 3), padding=1,stride=1)
    # tar = tar.permute(0, 2, 1)
    # tar = F.normalize(tar, dim=2)  # [N, Hr*Wr, C*k*k] [1, 1024, 2304]
    rel = torch.matmul(tar, lr1)   # [N, Hr*Wr, H*W]
    
    maxrel, _ = torch.max(rel, dim=1)  # [b, 256]
    maxrel[maxrel[...] < 0.55] = 0
    maxrel[maxrel[...] != 0] = 1
    # maxrel = maxrel.cpu().numpy()
    maxrel = torch.sum(maxrel, dim=1)/maxrel.shape[1]
    # maxrel = torch.sum(maxrel, dim=1)
    return list(zip(tar_name, maxrel))
