import os
import time
import base64
import requests

import numpy as np
import pandas as pd

import shutil

from copy import deepcopy
from openpyxl import Workbook
from openpyxl.drawing.image import Image

from hashlib import md5 as MD5

from collections import Counter
try:
    import imagesize
    from cvio import cvio
except Exception as e:
    print(e)
    from . import imagesize
    from .cvio import cvio    

def encodeImg(imgName):
    with open(imgName, 'rb') as f:
        data = f.read()
        encodestr = base64.b64encode(data)
    return encodestr


def conv2hash(x):
    # 将输入的str转化未哈希值
    return MD5(str(x).encode('utf-8')).hexdigest()

def post_http(url, headers={}, data={}):
    # 发送http请求
    res = requests.post(url=url, headers=headers, json=data).json()
    return res

def get_url(ip, port, route):
    # 串联服务接口
    url = '%s:%s/%s' % ( ip, port, route)
    print(url)
    return url

def count_db(ip, port, userName=None, route='10104', save=None):
    if userName in (None, ''):
        data = {}
    else:
        data = dict(userName=userName, route=route)

    # data: {} return learned matcher
    # data: userName, route

    url_route = 'online_modeling/count_dbase'
    url = get_url(ip, port, url_route)
    res = post_http(url=url, data=data)
    
    if res['status'] == 1:
        print('查询成功')
        print('用户库', res['count']['users'])
        print('在线用户', res['count']['online'])
        ncls = len(res['count']['count'])
        ntotal = 0
        nol = 0
        count = dict(类别=[], 总库数量=[], 已加载数量=[])
        for k, v in res['count']['count'].items():
            ntotal += v['N']
            nol += v['n']
            count['类别'].append(k)
            count['总库数量'].append(v['N'])
            count['已加载数量'].append(v['n'])

        if userName and userName not in res['count']['online']:
            if userName not in res['count']['users']:
                print('用户库不存在 "%s"' % (userName))
            else:
                print('用户库不在线 "%s"' % (userName))
        if userName in (None, ''):
            print('已学习库', '%d款SKU 总%d条特征 已加载%d条' % (ncls, ntotal, nol))
        else:
            if route in (None, ''):
                print('用户库"%s"路由' % userName, res['count']['routes'])
            else:
                print('%s/%s' % (userName, route), '%d款SKU 总%d条特征 已加载%d条' % (ncls, ntotal, nol))
        if save not in (None, ''):
            if userName in (None, ''):
                userName = '已学习库'
            if route in (None, ''):
                route = ''
            pd.DataFrame(count).to_excel(save, sheet_name='%s%s' % (userName, route), index=False)
    else:
        print('查询失败')

    return res

def delete_db(ip, port, userName, route, labels=None):
    url_route = 'online_modeling/delete_dbase'
    url = get_url(ip, port, url_route)
    labels = None if labels == '' else labels
    if labels is not None and isinstance(labels, str):
        with open(labels) as fp:
            labels = [f.replace('\n', '') for f in fp.readlines() if f != '\n']      
    data = dict(userName=userName, route=route, labels=labels)
    res = post_http(url=url, data=data) 
    if res['status'] == 1:
        print('%s/%s 成功删除%d条' % (userName, route, res['num']))
    else:
        print('%s/%s 删除失败 %s' % (userName, route, res['info']))
    return res

def insert_db(ip, port, userName, route, src, batch_size=256):
    url_route = 'online_modeling/insert_dbase'
    url = get_url(ip, port, url_route)    

    print('加载本地图片', src)
    images = cvio.load_image_list(src, recursive=True)
    num = len(images)
    for i in range(0, num, batch_size):
        j = min(i + batch_size, num)
        batched = []
        print('特征提取 %s/%s [%d/%d] %.2f%%' % (userName, route, j, num, j / num * 100))
        for x in range(i, j):
            path = images[x]
            # b64im = encodeImg(path)
            b64im = str(encodeImg(path), encoding='utf-8')
            label = path.split(os.sep)[-2]
            identity = conv2hash(path)
            img = dict(base64data=b64im, label=label, identity=identity)
            batched.append(img)
        data=dict(userName=userName, route=route, img_infos=batched)
        res = post_http(url, data=data)
        # data = parse.urlencode(data).encode('utf-8')
        # res = request.Request(url, data=data)
        # res = request.urlopen(res).read()
        # res = bytes.decode(res)
        # res = json.loads(res)        
        if res['status'] != 1:
            print('出现异常', res)
            return

def load_index_ul(ip, port, userName, route):
    url_route = 'online_modeling/load_udb_index'
    url = get_url(ip, port, url_route)    

    res = post_http(url, data=dict(userName=userName, route=route))
    if res['status'] == 1:
        print('加载成功', '%s/%s' % (userName, route))
    else:
        print('加载失败', res['info'])
    return res


def empty_udb_cache(ip, port, userName=None):
    url_route = 'online_modeling/empty_udb_cache'
    url = get_url(ip, port, url_route)    

    userName = None if userName == '' else userName
    res = post_http(url, data=dict(userName=userName))

    if res['status']:
        if userName is None:
            print('成功释放所有缓存')
        else:
            print('成功释放用户“%s”缓存' % userName)
    else:
        print('缓存释放失败', res['info'])

_labelme_format = {
    "imagePath": "",
    "imageHeight": 0,
    "imageWidth": 0,
    "imageData": None,
    "lineColor": [
        0,
        255,
        0,
        128
    ],
    "fillColor": [
        255,
        0,
        0,
        128
    ],
    "flags": {},
    "shapes": [
        # {
        #     "line_color":None,
        #     "fill_color":None,
        #     "label":"",
        #     "shape_type":"polygon",
        #     "flags":{},
        #     "points":[],
        # }
    ]
}


def gen_mask_to(saveroot, imgpath, data, filter_daiding=False, with_score=False, keep_labels=None):
    if not os.path.exists(saveroot):
        os.makedirs(saveroot)
    if keep_labels not in ('', None) and isinstance(keep_labels, str):
        with open(keep_labels) as fp:
            keep_labels = [f.replace('\n', '') for f in fp.readlines() if f != '\n']   
        
    boxInfo = data['boxInfo']
    imageInfo = data['imageInfo']
    width = imageInfo['width']
    height = imageInfo['height']
    Mask = deepcopy(_labelme_format)
    Mask['imagePath'] = os.path.basename(imgpath)
    Mask['imageWidth'] = width
    Mask['imageHeight'] = height
    Shape = {
        "line_color": None,
        "fill_color": None,
        "label": "",
        "shape_type": "polygon",
        "flags": {},
        "points": [],
    }
    layers = []
    labels = []
    for sku in boxInfo:
        if filter_daiding and 'daiding' in sku['skuName']:
            continue
        shape = deepcopy(Shape)
        label = sku['skuName']#.split('-')[0]
        if keep_labels not in (None, '') and label not in keep_labels:
            continue
        labels.append(label)
        if 'layer' in sku and sku['layer'] != '':
            layer = int(sku['layer'])
            layers.append(layer)
            shape['layer'] = layer
        if with_score:
            score = sku['score']
            label = '%s_%.2f' % (label, float(score))
        shape['label'] = label
        shape['points'] = [[int(sku['location']['xmin'] * width), int(sku['location']['ymin'] * height)],
                           [int(sku['location']['xmin'] * width),
                            int(sku['location']['ymax'] * height)],
                           [int(sku['location']['xmax'] * width),
                            int(sku['location']['ymax'] * height)],
                           [int(sku['location']['xmax'] * width), int(sku['location']['ymin'] * height)]]
        if 'index' in sku:
            shape['index'] = sku['index']
        Mask['shapes'].append(shape)
    if layers and filter_daiding:
        Mask['shapes'] = [Mask['shapes'][i]
                          for i in np.argsort(np.array(layers))]
    else:
        Mask['shapes'] = [Mask['shapes'][i]
                          for i in np.argsort(np.array(labels))]
    cvio.write_ann(Mask, os.path.join(
        saveroot, os.path.splitext(imgpath)[0]+'.json'))

def imdetect(ip, port, src, save=None, recursive=False, mode='skuDet', tenantId='10104',
        userName=None, route='10104', lthr=0.5, ltopk=5, uthr=0.5, utopk=5,
        filter_daiding=False, with_score=True, empty=True, only_main=True, keep_labels=None,):

    if userName not in (None, '', 'None'):
        url_count = 'online_modeling/count_dbase'
        url_count = get_url(ip, port, url_count)
        res = post_http(url=url_count, data=dict(userName=userName, route=route))
        assert res['status'] == 1, '查询用户"%s"失败, 请先手动加载用户库'
        assert userName in res['count']['users'], '用户库"%s"不存在, 请先创建' % (userName)
        # assert userName in res['count']['online'], '用户库"%s"不在线, 请先加载' % (userName)
    else:
        userName = None

    url_route = 'detService/%s' % mode
    url = get_url(ip, port, url_route)        

    headers = dict(tenantId=tenantId)
    
    print('加载图片', src)
    images = cvio.load_image_list(src, recursive=recursive)
    if not len(images):
        print('无图片，退出')
        return
    ntotal = len(images)
    start = time.time()
    for i, path in enumerate(images, 1):
        name = os.path.basename(path)
        b64im = str(encodeImg(path), encoding='utf-8')
        data = dict(base64Data=b64im, 
                userName=userName, route=route,
                lscore=lthr, ltopk=ltopk, 
                uscore=uthr, utopk=utopk, 
                empty=empty, only_main=only_main)
        result = post_http(url, headers, data)
        info = '[%d/%d](%.2f%%) %s' % (i, ntotal, i / ntotal * 100, name)
        if result['status'] == 0:
            print('%s 识别失败 %s' % (info, result['data']))
            continue
        ncls = len(result['data']['skuStat'])
        nbox = len(result['data']['boxInfo'])
        print('%s 识别成功 检出%d类%d框' % (info, ncls, nbox))
        if save is None:
            save1 = os.path.dirname(path)
        elif recursive:
            save1 = os.path.join(save, path.split(os.sep)[-2])
        else:
            save1 = save

        gen_mask_to(
            save1, name, result['data'], filter_daiding=filter_daiding, with_score=with_score, keep_labels=keep_labels)
    end = time.time()
    print('识别完成 共%d张图片 耗时%.2fs 平均%.2fs/张' % (ntotal, end - start, (end - start) / ntotal))

def search_similar_images(ip, port, src, save, score=0.5, topk=5, batch_size=256, learned_matcher=True, userName=None, route='10104'):
    url_route = 'detService/search_similar_images'
    url = get_url(ip, port, url_route)    

    print('加载图片', src)
    images = cvio.load_image_list(src, recursive=True)
    ntotal = len(images)

    if topk > 5:
        print('topk参数k最大取值5')
        topk = 5
    columns = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')

    wb = Workbook()
    ws = wb.create_sheet("匹配结果", 0)  # 插入到最开始的位置     
    ws.append(['文件夹', '文件名', '图片'] + ['Top-%d' % (x+1) for x in range(topk)])    
    start = time.time()
    for i in range(0, ntotal, batch_size):
        j = min(i + batch_size, ntotal)
        inputs = []
        paths = []
        for x in range(i, j):
            path = images[x]
            paths.append(path)
            image = dict(base64data=str(encodeImg(path), encoding='utf-8'), label='', identity='')
            inputs.append(image)
        res = post_http(url, data=dict(images=inputs, topk=topk, score=score, 
                learned_matcher=learned_matcher, userName=userName, route=route))
        print('匹配图片 [%d/%d] %.2f%%' % (j, ntotal, j / ntotal * 100))
        if res['status'] == 0:
            print('匹配异常', res['data'])
            return

        for x, (path, result) in enumerate(zip(paths, res['data']), i+2):
            name = os.path.basename(path)
            folder = path.split(os.sep)[-2]
            imgsize = imagesize.get(path)
            s = max(imgsize) / 128
            height, width = (imgsize[1] / s, imgsize[0] / s)
            img = Image(path)     
            img.width, img.height = (width, height)
            ws.column_dimensions['C'].width = width * 0.6
            ws.row_dimensions[x].height = height * 0.8         
            ws.add_image(img, 'C%d' % x)       
            ws.column_dimensions['A'].width = 25.0
            ws.column_dimensions['B'].width = 8.0
            ws.cell(row=x, column=1, value=folder)
            ws.cell(row=x, column=2, value=name)
            for j, res in enumerate(result, 4):
                _, label, score = res
                ws.column_dimensions['%s' % (columns[j-1])].width = 25.0
                ws.cell(row=x, column=j, value='%s %.2f' % (label, score))
            

    dirname = os.path.dirname(save)
    if not os.path.exists(dirname):
        print('创建目录', dirname)
        os.makedirs(dirname)
    if os.path.exists(save):
        os.remove(save)
    wb.save(save)
    end = time.time()
    print('匹配完成 共%d张图片 耗时%.2fs 平均%.2fs/张' % (ntotal, end - start, (end - start) / ntotal))
    print('匹配结果详见', save)

def cluster_similar_images(ip, port, src, save, score=0.0, topk=5, batch_size=256, learned_matcher=True, userName=None, route='10104'):
    url_route = 'detService/search_similar_images'
    url = get_url(ip, port, url_route)    

    print('加载图片', src)
    images = cvio.load_image_list(src, recursive=True)
    ntotal = len(images)
    assert ntotal > 0, '目录下无图片 %s' % src

    start = time.time()
    for i in range(0, ntotal, batch_size):
        j = min(i + batch_size, ntotal)
        inputs = []
        paths = []
        for x in range(i, j):
            path = images[x]
            paths.append(path)
            image = dict(base64data=str(encodeImg(path), encoding='utf-8'), label='', identity='')
            inputs.append(image)
        res = post_http(url, data=dict(images=inputs, topk=topk, score=score, 
                learned_matcher=learned_matcher, userName=userName, route=route))
        print('匹配图片 [%d/%d] %.2f%%' % (j, ntotal, j / ntotal * 100))
        if res['status'] == 0:
            print('匹配异常', res['data'])
            return

        nmis = 0
        for x, (path, result) in enumerate(zip(paths, res['data']), i+2):
            name = os.path.basename(path)
            folder = path.split(os.sep)[-2]
            labels = Counter([res[1] for res in result])
            nums = [v for k, v in labels.items()]
            if not len(nums):
                nmis += 1
                continue
            label = list(labels)[np.argmax(nums)]

            save_path = os.path.join(save, label)
            if not os.path.exists(save_path):
                print('创建目录', save_path)
                os.makedirs(save_path)            
            print('归类图片', name, label)
            shutil.move(path, os.path.join(save_path, name))

    end = time.time()
    print('图片归类完成 共%d张图片 重归类%d张 耗时%.2fs 平均%.2fs/张' % (ntotal, ntotal - nmis, end - start, (end - start) / ntotal))
    print('图片归类结果详见', save)

if __name__ == '__main__':
    ip = 'http://192.168.11.25'
    port = '9997'
    userName = 'cola_1'
    route = '10444'#'10104'

    save = r'G:\projects\Tools-1\mvlab\count.xlsx'
    # count_db(ip, port, userName=userName, route=route, save=save)

    # delete_db(ip, port, userName, route=route, labels=[])

    src = r'G:\data\datasets\drink\pesi\ai\pesi_baipai_all(完成部分new\cuts'
    save = r'G:\data\datasets\drink\pesi\ai\pesi_baipai_all(完成部分new\cutsMMM'
    # insert_db(ip, port, userName, route, src, batch_size=256)

    count_db(ip, port, userName=userName, route=route, save=save)

    # load_db(ip, port, userName, route)

    # empty_udb_cache(ip, port, userName=userName)

    # src = r'G:\data\datasets\drink\pesi\ai\pesi_baipai_all(完成部分new'
    # save = r'G:\data\datasets\drink\pesi\ai\pesi_baipai_all(完成部分new\pred'
    # imdetect(ip, port, src, save, recursive=False, mode='layerDet', tenantId='10104', 
    #     userName=None, route='10104', lthr=0.5, ltopk=5, uthr=0.5, utopk=5, 
    #     filter_daiding=False, with_score=False, empty=True, only_main=True, keep_labels=['daiding_101'])

    # src = r'E:\Documents\玄武AI实验室\内部培训\推理过程描述\similar'
    # save = r'E:\Documents\玄武AI实验室\内部培训\推理过程描述\similar\res1.xlsx'
    # search_similar_images(ip, port, src, save, score=0, topk=3, batch_size=4, 
    #         learned_matcher=True, userName=userName, route=route)
    # save = r'G:\data\yqsl\test\cuts\yl_ww_wz_yw_145G\tmp2'
    # cluster_similar_images(ip, port, src, save, score=0, topk=3, batch_size=256, 
    #         learned_matcher=False, userName=userName, route=route)
