import json,requests,os
import xlrd
import urllib.parse
import time
import datetime
import re
import urllib.request

import os,csv,random,queue,multiprocessing
import numpy as np
pic_path = r'/home/ubuntu/data_tongue'
# feature_path = r'/home/ubuntu/data/main_label_feature.csv'
# feature_list = csv.reader(open(feature_path,'r'))
feature_dict = {}
feature_txt = r'/home/ubuntu/data/label_feature.txt'
import threading
#
# class Throttle:
#
#     def __init__(self, delay):
#         # 访问同域名时需要的间隔时间
#         self.delay = delay
#         # key:netloc,value:lastTime.记录每个域名的上一次访问的时间戳
#         self.domains = {}
#
#     # 计算需要的等待时间，并执行等待
#     def wait(self, url):
#         if self.delay <= 0:
#             print('delay ='+self.delay)
#             return
#
#         domain = urllib.parse.urlparse(url).netloc
#         lastTime = self.domains.get(domain)
#
#         if lastTime is not None:
#             # 计算等待时间
#             wait_sec = self.delay - (datetime.datetime.now() - lastTime).seconds
#             if wait_sec > 0:
#                 time.sleep(wait_sec)
#
#         self.domains[domain] = datetime.datetime.now()
#
# # from openpyxl import load_workbook
# # file_xlsx =r'/home/ubuntu/data/single_disease.xlsx'
# # file_inquiry_tongue =r'/home/ubuntu/data/inquiry_tongue.xlsx'
# # wb = load_workbook(file_inquiry_tongue)
# # sheet = wb.sheetnames[0]
# # ws = wb[sheet]
# # rows = ws.rows
# #
# # columns = ws.columns
# # files = []
# # num = 0
# # for row in rows:
# #     line = [col.value for col in row]
# #     num += 1
# #     if num > 1:
# #         files.append(line[-1][1:-1].split(','))
# #         print(line)
#
# # 获取数据
# # data = xlrd.open_workbook(file_inquiry_tongue)
# # # 获取sheet 此处有图注释（见图1）
# # table = data.sheet_by_name('Sheet1')
# # # 获取总行数
# # nrows = table.nrows
# # # 获取总列数
# # ncols = table.ncols
# # rowvalue = table.row_values(0)
# # col_values = table.col_values(11)
# # col_values_id = table.col_values(0)
# # # 获取一个单元格的数值，例如第5行第6列
# # cell_value = table.cell(5, 6).value
# # file_path = json.loads(col_values[11])['facePicture'][0]
#
root = r'/home/ubuntu/data/inquiry_tongue_all_data/inquiry_tongues'
txt_path = r'/home/ubuntu/data/inquiry_tongue_all_data/inquiry_tongue_all.txt'
f_open = open('/home/ubuntu/data/inquiry_tongue_all_data/feature_images.txt', 'a+')


#
def get_url(files):
    url_queue = []
    for file in files:
        # print(file)
        if file != 'NULL' :

            if file.startswith('https:'):

                file = r'http://inquiry.cn-bj.ufileos.com/' + file.split(r'/')[-1][8:]
                url_queue.append(file)
                # if not os.path.exists(path):
                #     print(path)
    return url_queue
#
def spider(file):

    path = root +'inquiry-'+file.split(r'/')[-1] + '.jpg'
    if not os.path.exists(path):
        print(path)
        r = requests.get(file)

        r.raise_for_status()
        print(path)

        with open(path, 'wb') as  f:
            f.write(r.content)
    # if not url_queue.empty(): # 如果队列未空，则该线程继续工作，从队列中取出url
    #     spider(url_queue)

import io,base64

def feature_list(num,file):
    f = open(os.path.join(root,file),'rb').read()
    img_b64encode = base64.b64encode(f).decode()
    time.sleep(1)
    tfclient_url = 'http://%s:%d/v1/models/default:predict' % ('117.50.18.50', 28097)
    data = {'instances': [{'input_image': {'b64': img_b64encode}}]}

    response = requests.post(tfclient_url, timeout=60, json=data).json()['predictions'][0]
    print(num,response)

    # data_img = data['instances'][0]['input_image']['b64']
    # # pass img_b64decode to docker
    # host = '117.50.18.50'
    # port = 8899
    # client_url = 'http://%s:%d' % (host, port)
    # data_img = {'b64': data_img, 'detec_box': response['detec_box']}
    # result = {}
    # if response['label_detec_score'] == 0:
    #
    #     result['result'] = ['图中似乎没有舌头']
    # elif response['label_tongue'] == 1:
    #     result['result'] = ['舌底']
    #
    # else:
    #     response = requests.post(client_url, timeout=60, data=data_img)
    #     data = file + ' ' + ','.join(json.loads(response.text)['index'])+'\n'
    #     print(333333333333333333333,data)
    #     f_open.write(data)
    #
    #     result['result'] = []

    return file,response


from multiprocessing import Pool


if __name__ == "__main__":
    f = open(txt_path, 'r')
    files = []
    nu = 0
    for num,file in enumerate(os.listdir(root)):
        print(num)
        files.append(file)
            # data = feature_list(file)
            # print(num)
    # for data in f.readlines():
    #     data_tmp = data.split('\t')[-1].strip('\n')
    #     if len(data_tmp) > 20 :
    #         for i in data_tmp.split(','):
    #
    #             if i.startswith('https'):
    #                 # print(i)
    #                 date = int(i.split('/')[-1].split('_')[-1][:8])
    #                 if date > 20181216:
    #                     nu += 1
    #                     print(nu)
    #                     files.append(i)
    # url_queue = feature_list(files)
    # for file in url_queue:
    #     spider(file)
    # pool = Pool(processes=30)  # 定义最大的进程数
    # pool.map(files, feature_list)  # p必须是一个可迭代变量。
    # pool.close()
    # pool.join()

    from concurrent.futures import ThreadPoolExecutor, as_completed
    pool = ThreadPoolExecutor(max_workers=40)
    futures = {
        pool.submit(
            feature_list,
            num,
            file
        ):
            file for num,file in enumerate(files)
    }
    result = []
    for future in as_completed(futures):
        f_open.write(str(future.result())+'\n')
    print(result)



    # result = feature_list(files[0])

# def load_imgs(files):
#     threads = []  # 线程列表
#     url_queue = get_url(files)
#     queue_count = 3  # 线程数量
#
#     for i in url_queue:
#
#         t = threading.Thread(target=spider, args=i) # 创建线程，第一个参数为线程要调用的函数，第二个参数为函数的参数
#
#         threads.append(t) # 把线程加入队列
#
#     for t in threads : # 线程开始
#         t.start()
#     for t in threads : # 等待所有线程结束
#         t.join()
#
# if __name__ == '__main__':
#     f = open(txt_path, 'r')
#     files = []
#     nu = 0
#     for data in f.readlines():
#         data_tmp = data.split('\t')[-1].strip('\n')
#         if len(data_tmp) > 20 and nu < 100:
#             nu += 1
#             for i in data_tmp.split(','):
#                 files.append(i)
#     # if not os.path.exists(root):
#     #     os.mkdir(root)
#     # t = Throttle(0.2)
#
#     pool = multiprocessing.Pool(processes=4)
#     pool.map(load_imgs, files)
#     pool.close()
#     pool.join()
# # for file in feature_list:
# #     file_part = file[1][8:].split(r'!')[0]
# #     feature_dict[file_part] = file[2:]
#
# # f = open(feature_txt,'w')
# # for i in range(1,len(files)):
#
#     # if col_values[i] != 'NULL':
#     #     print()
#         # file_path1 = json.loads(col_values[i])['facePicture']
#         # file_path2 = json.loads(col_values[i])['prescPicture']
#         # file_path3 = json.loads(col_values[i])['tonguePicture']
#         # pid = col_values_id[i]
#         # if len(file_path3)>0:
#         #     for k in range(len(file_path3)):
#         #         print(file_path3[k])
#         #         file = file_path3[k].split(r'/')[-1]
#         #         if file.startswith('inquiry-'):
#         #             # file_part = file[8:].split(r'!')[0]
#         #
#         #             if not os.path.exists(os.path.join(r'/home/ubuntu/data/dieseas_test/0', file+'.jpg')):
#         #
#         #                 os.symlink(os.path.join(r'/home/ubuntu/data/pics_disease', file+'.jpg'),
#         #                            os.path.join('/home/ubuntu/data/dieseas_test/0', file+'.jpg'))
#                     # if file_part  in feature_dict.keys():
#                     #     f.write(pid+' '+file_path3[k]+' '+ str(feature_dict[file_part][:-9])+'\n')
#         # numc += len(file_path3)
#         # numb +=1
#         # print(i,len(file_path3))
#         # if i == 652: print(file_path2)
#         # file_path = file_path1 + file_path2 + file_path3
#         # file_path = [i for i in file_path if len(i) != 0]
#         # for file in file_path:
#         #     # print(file)
#         #     path = root + file.split(r'/')[-1] + '.jpg'
#         #     # if file.split(r'/')[-1][8:].startswith('-'):
#         #     #     file = r'http://inquiry.cn-bj.ufileos.com/' + file.split(r'/')[-1][9:]
#         #     # else:
#         #     #     file = r'http://inquiry.cn-bj.ufileos.com/'+file.split(r'/')[-1][8:]
#         #     if not os.path.exists(path):
#         #         print(path)
#         #
#         #         r = requests.get(file)
#         #         t.wait(file)
#         #         r.raise_for_status()
#         #         with open(path, 'wb') as  f:
#         #             f.write(r.content)
#
