####角膜部分引用
import base64
import copy
import requests
import re
from StillGAN_API import stillgan_model
from stage1 import generate_heatmap
from stage2 import predict as grading

import traceback
####
import zipfile
from flask import Response
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask_models.files import Files
from flask_models.configs import Config
from io import BytesIO
from io import BytesIO as Bytes2Data
import _thread
import random
from hashlib import sha1

from flask_cors import CORS

from model.tools import test, pre_process, tw
from flask import Flask, request, jsonify, redirect, url_for, make_response, send_from_directory, \
    send_file
from werkzeug.utils import secure_filename
from datetime import timedelta

import cv2
###############
from torch.autograd import Variable
##############
# import DR.matlabarray

from vessel_segmentation.imed_models import CS_Net
from DR.fine_tune import BiResNet
from torchvision import transforms

from enhance.options.test_options import TestOptions
from enhance.data import create_dataset
from enhance.models import create_model

import matplotlib.pyplot as plt

# import matlab
# import matlab.engine
import json

from CHenTao.API import interface as SbspAPI
from DR_project.API import interface as TangwangAPI

# from himed_dao.HimedDao import HimedDao
# from himed_dao.UserInfo import UserInfo
# from himed_dao.DataInfo import DataInfo
# from himed_dao.DaoUtils import HashUtils
from StillGAN.models import create_model
from StillGAN.data import create_dataset
from StillGAN.options.test_options import TestOptions
from StillGAN.util.util import *
from StillGAN.models.networks import ResUNet

import time
import os
from DR_seg import showing_seg, segmenting_DR, fshowing_seg, fsegmenting_DR
import functools
from torch import nn
import threading
from flask_models.dbmodelss import Arguments

###############
###################################################################
##测试
pymysql.install_as_MySQLdb()

app = Flask(__name__, template_folder='./templates',
            static_folder='./templates', static_url_path='')
# 读取配置
app.config.from_object(Config)
print(Config())
# 初始化数据库连接
engine = create_engine(str(Config()))
DbSession = sessionmaker(bind=engine)
dbs = DbSession()

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

# matplotlib.use("Qt5Agg")  # 声明使用QT5
plt.rcParams['font.sans-serif'] = ['Simhei']

Arteriovenous_model_path = "model-new.pth"  # 动静脉分割的模型
# Arter_net = torch.load(Arteriovenous_model_path).cuda()  # 动静脉分割网络的加载
Arter_net = torch.load(Arteriovenous_model_path, map_location='cpu').module  # 动静脉分割网络的加载
Arter_net.eval()

ALLOWED_EXTENSIONS = {'png', 'jpg', 'JPG', 'PNG', 'bmp', 'jpeg', 'tif', 'TIF', 'tiff', 'TIFF'}  # 设置允许的图片格式
BASE_PATH = os.path.dirname(os.path.abspath(__file__))  # 本工程路径
basepath = os.path.dirname(__file__)
unfinished_thread = 0


##########################################################################
def image_to_base64(image: Image.Image, fmt='png') -> str:
    output_buffer = BytesIO()
    image.save(output_buffer, format=fmt)
    byte_data = output_buffer.getvalue()
    base64_str = base64.b64encode(byte_data).decode('utf-8')
    return base64_str


def encode_image(filename):
    """
   编码图片
   :param filename: str 本地图片文件名
   :return: str 编码后的字符串
       eg:
       src="
           yH5BAAAAAAALAAAAAAzADEAAAK8jI+pBr0PowytzotTtbm/DTqQ6C3hGX
           ElcraA9jIr66ozVpM3nseUvYP1UEHF0FUUHkNJxhLZfEJNvol06tzwrgd
           LbXsFZYmSMPnHLB+zNJFbq15+SOf50+6rG7lKOjwV1ibGdhHYRVYVJ9Wn
           k2HWtLdIWMSH9lfyODZoZTb4xdnpxQSEF9oyOWIqp6gaI9pI1Qo7BijbF
           ZkoaAtEeiiLeKn72xM7vMZofJy8zJys2UxsCT3kO229LH1tXAAAOw=="

   """
    # 1、文件读取
    ext = filename.split(".")[-1]

    with open(filename, "rb") as f:
        img = f.read()
    img = img.con
    # 2、base64编码
    data = base64.b64encode(img).decode()

    # 3、图片编码字符串拼接

    return data


# array转换为bytes
def array2bytes(array_img, suffix):
    # 对数组的图片格式进行编码
    success, encoded_array = cv2.imencode(
        "." + suffix, array_img)
    # 将数组转为bytes
    bytes_img = encoded_array.tobytes()
    return bytes_img


# 分离文件的后缀名来判断是否是允许上传的图像文件格式
def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS


# 糖尿病性视网膜病变的概率诊断
# def dr_num(upload_path, basepath):
#     image = Image.open(upload_path)
#     resize_transform = transforms.Compose([
#         transforms.Resize((512, 512)),
#         transforms.ToTensor(),
#         transforms.Normalize(mean=[0.321, 0.224, 0.161], std=[0.262, 0.183, 0.132]),
#     ])
#     image = resize_transform(image).view([1, 3, 512, 512])
#     image = image
#     #分割部分
#     pred_arr = segmenting_DR(upload_path)
#     img_arr = showing_seg(upload_path, pred_arr)
#     # c,h=cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#     predict_array = np.zeros_like(pred_arr).astype(np.uint8)
#     pred_flag_arr = np.argmax(pred_arr, axis=0)
#     c_list=[]
#     for j in [1,2,3,5]:
#         predict_array[j, :, :][pred_flag_arr == j] = 255
#         c, h = cv2.findContours(predict_array[j, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#         c_list.append(len(c))
#     net = BiResNet(num_class=5, model_name='resnet34')
#     a = os.path.join(basepath, 'DR_grading.pkl')
#     abc = torch.load(a, map_location='cpu')  # , map_location='cpu'
#     net.load_state_dict(abc)
#     with torch.no_grad():
#         net.eval()
#         x1, x2, predictions = net(image)
#         probs = torch.softmax(predictions, dim=1)
#         probs = probs.data.cpu().numpy()
#         probs =probs[0].tolist()
#         probs = [round(x,2) for x in probs]
#     return img_arr,probs,c_list
def dr_num(upload_path):
    print('函数' + upload_path + '开始执行')
    image = Image.open(upload_path)
    resize_transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.321, 0.224, 0.161], std=[0.262, 0.183, 0.132]),
    ])
    image = resize_transform(image).view([1, 3, 512, 512])
    image = image
    # 分割部分
    pred_arr = segmenting_DR(upload_path)
    img_arr = showing_seg(upload_path, pred_arr)

    # c,h=cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    predict_array = np.zeros_like(pred_arr).astype(np.uint8)
    pred_flag_arr = np.argmax(pred_arr, axis=0)
    c_list = []
    w_list = []
    for j in [1, 2, 3, 5]:
        predict_array[j, :, :][pred_flag_arr == j] = 255
        c, h = cv2.findContours(predict_array[j, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        c_list.append(len(c))
        w = 0
        for cc in c:
            w += cv2.contourArea(cc)
        w_list.append(w)
    net = BiResNet(num_class=5, model_name='resnet34')
    a = os.path.join(basepath, 'DR_grading.pkl')
    abc = torch.load(a, map_location='cpu')  # , map_location='cpu'
    net.load_state_dict(abc)
    font = cv2.FONT_HERSHEY_SIMPLEX
    s_l = int(img_arr.shape[0] * np.sqrt(2592 * 1944 / (img_arr.shape[0] * img_arr.shape[1])))
    s_w = int(img_arr.shape[1] * np.sqrt(2592 * 1944 / (img_arr.shape[0] * img_arr.shape[1])))
    print(s_l)
    print(s_w)
    img_arr = cv2.resize(img_arr, (s_w, s_l))
    legend = cv2.imread('tmp/leg.png')
    legend = cv2.resize(legend, (245, 320))
    # print(legend.shape)
    img_arr[0:320, 0:245, :] = legend
    cv2.imwrite('tmp/model1/re/' + upload_path[15:], img_arr)
    with torch.no_grad():
        net.eval()
        x1, x2, predictions = net(image)
        probs = torch.softmax(predictions, dim=1)
        probs = probs.data.cpu().numpy()
        probs = probs[0].tolist()
        probs = [round(100 * x, 2) for x in probs]
    print('函数' + upload_path + '结束执行')
    return img_arr, probs, c_list, w_list


def dr_grading(upload_path, basepath):
    image = Image.open(upload_path)
    resize_transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.321, 0.224, 0.161], std=[0.262, 0.183, 0.132]),
    ])
    image = resize_transform(image).view([1, 3, 512, 512])
    image = image

    net = BiResNet(num_class=5, model_name='resnet34')
    a = os.path.join(basepath, 'DR_grading.pkl')
    abc = torch.load(a, map_location='cpu')  # , map_location='cpu'
    net.load_state_dict(abc)
    with torch.no_grad():
        net.eval()
        x1, x2, predictions = net(image)
        probs = torch.softmax(predictions, dim=1)
        probs = probs.data.cpu().numpy()
        probs = probs[0]

    # labels = ['正常', '1级糖尿病性视网膜病变概率', '2级糖尿病性视网膜病变概率', '3级糖尿病性视网膜病变概率', '4级糖尿病性视网膜病变概率']
    # colors = ['green', 'yellow', 'pink', 'red', 'blue']
    # explode = (0.1, 0.1, 0., 0.1, 0.1)
    # fig = plt.figure()
    # fig.set_facecolor('black')
    # plt.axis('equal')
    # plt.pie(x=probs, colors=colors, labels=labels, explode=explode, autopct='%1.1f',
    #         textprops={'color': 'orange'})
    # plt.legend(loc="upper right", fontsize=8, bbox_to_anchor=(1.1, 1.05), borderaxespad=0.3)
    # plt.savefig('static/download/pie.png')

    return 0


# 图片的叠加处理
def applyImage(imgname, segment, branch):
    """:param
    img: the original RGB image --- numpy array with shape: [w,h,3]
    seg: the segmentation map --- numpy array with shape: [w,h] (1-channel image)
    """
    img = cv2.imread(imgname, flags=1)
    image = Image.fromarray(img).convert("RGB")
    image = image.resize((512, 512))
    image = np.array(image, dtype='uint8')
    seg = Image.fromarray(segment).convert("RGB")
    seg = np.array(seg, dtype='uint8')
    bra = np.array(branch).astype(int)
    bra = np.array(bra, dtype='uint8')
    point = np.where(bra == 1)

    # bra = Image.fromarray(bra).convert("RGB")
    # bra = np.array(bra, dtype='uint8')
    image[:, :, 0][seg[:, :, 0] == 255] = 25
    image[:, :, 1][seg[:, :, 1] == 255] = 255
    image[:, :, 2][seg[:, :, 2] == 255] = 13
    for i in range(len(point[0])):
        cv2.circle(seg, (point[1][i], point[0][i]), 3, (67, 6, 255), -1)
    return image, seg


# 血管分类的叠加处理
def SuperSeg(imgname, segment):
    img = cv2.imread(imgname, flags=1)
    image = Image.fromarray(img).convert("RGB")
    image = image.resize((512, 512))
    image = np.array(image, dtype='uint8')


# # 血管分割
# def vessel_seg(image_name):
#     img = Image.open(image_name)
#     resize_transform = transforms.Compose([
#         transforms.Resize([512, 512]),
#         transforms.ToTensor()])
#     img = resize_transform(img).view([1, 3, 512, 512])
#     model = CS_Net(3, 1)
#     b = os.path.join(basepath, 'state-179.pkl')
#     bc = torch.load(b, map_location='cpu')  # , map_location='cpu'
#     model.load_state_dict(bc)
#
#     with torch.no_grad():
#         model.eval()
#         img = img
#         output1 = model(img)
#         output1 = output1.squeeze().detach().cpu().numpy()
#         output1 = np.array(output1 * 255, dtype='uint8')
#     threshed_pred, output = cv2.threshold(output1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#     image = output
#     image = image / 255
#
#     cv2.imwrite('aaa.jpg', image)
#     image_name = 'aaa.jpg'
#
#     engine = matlab.engine.start_matlab()
#     feature_out = engine.vessel_feature(image_name)
#     out = engine.vessel_branch(image_name)
#
#     ff = open('static/download/data.txt', "w")
#
#     item = '弯曲度特征为：' + str(round(feature_out[0][0], 3))
#     ff.write(str(item))
#     ff.write('\n')
#     item = 'VAD特征为：' + str(round(feature_out[0][1], 3))
#     ff.write(str(item))
#     ff.write('\n')
#     item = 'VLD特征为：' + str(round(feature_out[0][2], 3))
#     ff.write(str(item))
#     ff.write('\n')
#     item = 'FD特征为：' + str(round(feature_out[0][3], 3))
#     ff.write(str(item))
#     ff.write('\n')
#
#     return output, feature_out, out


# 绘制血管中心线
# def vessel_centerline(img_path):
#     engine = matlab.engine.start_matlab()
#     out = engine.vessel_centerline(img_path)
#     out = np.array(out)
#     out = out * 255
#
#     centerline_path = "./tmp/vessel_centerline.png"
#
#     cv2.imwrite(centerline_path, out)
#
#     img = cv2.imread('./tmp/vessel_centerline.png', flags=1)
#     img = cv2.resize(img, (512, 512))
#     image = Image.fromarray(img).convert("RGB")
#     image = np.array(image, dtype='uint8')
#     seg = cv2.imread(centerline_path, flags=1)
#     seg = Image.fromarray(seg).convert("RGB")
#     seg = np.array(seg, dtype='uint8')
#     image[:, :, 0][seg[:, :, 0] == 255] = 25
#     image[:, :, 1][seg[:, :, 1] == 255] = 255
#     image[:, :, 2][seg[:, :, 2] == 255] = 13
#     cv2.imwrite(centerline_path, image)


# 绘制动静脉
def vessel_Arter(img_path):
    img_dir = img_path
    img = cv2.imread(img_dir)
    img = img[:, :, ::-1]
    img = cv2.resize(img, (512, 512))
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    r, g, b = cv2.split(img)
    r = clahe.apply(r)
    g = clahe.apply(g)
    b = clahe.apply(b)
    img = cv2.merge([r, g, b])
    img = np.array(img).transpose(2, 0, 1) / 255.0

    img = [img]

    img = torch.Tensor(img)
    img = img

    pred = Arter_net(img)
    pred[:, 1, :, :] = 0
    pred = pred.cpu().detach().numpy()
    # 从list里取出来
    pred = pred[0].transpose(1, 2, 0) * 255
    # 544,544,3 rgb
    cv2.imwrite('./tmp/vessel_Ater.png', pred)


###############################seg界面##################################
# 某工具
class Identity(nn.Module):
    def forward(self, x):
        return x


def get_norm_layer(norm_type='instance'):
    """Return a normalization layer

    Parameters:
        norm_type (str) -- the name of the normalization layer: batch | instance | none

    For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
    For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
    """
    if norm_type == 'batch':
        norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
    elif norm_type == 'instance':
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
    elif norm_type == 'none':
        def norm_layer(x):
            return Identity()
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
    return norm_layer


# 增强预处理
def pre_still(img):
    transform_list = []
    res = img
    osize = [512, 512]
    transform_list.append(transforms.Resize(osize, Image.BICUBIC))
    transform_list.append(transforms.RandomCrop(512))
    transform_list += [transforms.ToTensor()]
    transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
    trans = transforms.Compose(transform_list)
    res = trans(res)
    res = Variable(torch.unsqueeze(res, dim=0).float(), requires_grad=False)
    return res


# 马博增强
def stillgan(img_path):
    current = Image.open(img_path)
    height, width = current.size
    pic = pre_still(current)
    model = ResUNet(3, 3, 64, norm_layer=get_norm_layer())
    net = torch.load('StillGAN/checkpoints/isee_csigan/120_net_G_A.pth', map_location=torch.device('cpu'))
    model.load_state_dict(net)
    runned = model(pic)
    image = tensor2im(runned)
    (r, g, b) = cv2.split(image)
    image = cv2.merge([b, g, r])
    fx = height
    fy = width
    image = cv2.resize(image, (fx, fy), interpolation=cv2.INTER_CUBIC)
    return image


def stillgan_ff(img):
    current = Image.fromarray(np.uint8(img))
    height, width = current.size
    pic = pre_still(current)
    model = ResUNet(3, 3, 64, norm_layer=get_norm_layer())
    net = torch.load('StillGAN/checkpoints/isee_csigan/120_net_G_A.pth', map_location=torch.device('cpu'))
    model.load_state_dict(net)
    runned = model(pic)
    image = tensor2im(runned)
    (r, g, b) = cv2.split(image)
    image = cv2.merge([b, g, r])
    fx = height
    fy = width
    image = cv2.resize(image, (fx, fy), interpolation=cv2.INTER_CUBIC)
    return image


# 血管分割叠加
# def vseg(image_name):
#     img = Image.open(image_name)
#     resize_transform = transforms.Compose([
#         transforms.Resize([512, 512]),
#         transforms.ToTensor()])
#     img = resize_transform(img).view([1, 3, 512, 512])
#     model = CS_Net(3, 1)
#     b = os.path.join(basepath, 'state-179.pkl')
#     bc = torch.load(b, map_location='cpu')  # , map_location='cpu'
#     model.load_state_dict(bc)
#
#     with torch.no_grad():
#         model.eval()
#         img = img
#         output1 = model(img)
#         output1 = output1.squeeze().detach().cpu().numpy()
#         output1 = np.array(output1 * 255, dtype='uint8')
#     threshed_pred, output = cv2.threshold(output1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#     image = output
#     cv2.imwrite('aaa.jpg', image)
#     image_name = 'aaa.jpg'
#     engine = matlab.engine.start_matlab()
#     feature_out = engine.vessel_feature(image_name)
#     return image, feature_out


def vseg_ff(image):
    img = Image.fromarray(np.uint8(image))
    resize_transform = transforms.Compose([
        transforms.Resize([512, 512]),
        transforms.ToTensor()])
    img = resize_transform(img).view([1, 3, 512, 512])
    model = CS_Net(3, 1)
    b = os.path.join(basepath, 'state-179.pkl')
    bc = torch.load(b, map_location='cpu')  # , map_location='cpu'
    model.load_state_dict(bc)

    with torch.no_grad():
        model.eval()
        img = img
        output1 = model(img)
        output1 = output1.squeeze().detach().cpu().numpy()
        output1 = np.array(output1 * 255, dtype='uint8')
    threshed_pred, output = cv2.threshold(output1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    image = output
    # engine = matlab.engine.start_matlab()
    cv2.imwrite('aaa.jpg', image)
    image_name = 'aaa.jpg'
    feature_out = engine.vessel_feature(image_name)
    return image, feature_out


def applyv(imgname, segment, rate):
    """:param
    img: the original RGB image --- numpy array with shape: [w,h,3]
    seg: the segmentation map --- numpy array with shape: [w,h] (1-channel image)
    """
    img = cv2.imread(imgname, flags=1)
    image = Image.fromarray(img).convert("RGB")
    image = image.resize((1024, int(1024 * rate)))
    image = np.array(image, dtype='uint8')
    seg = Image.fromarray(segment).convert("RGB")
    seg = seg.resize((1024, int(1024 * rate)))
    seg = np.array(seg, dtype='uint8')

    # bra = Image.fromarray(bra).convert("RGB")
    # bra = np.array(bra, dtype='uint8')
    image[:, :, 0][seg[:, :, 0] == 255] = 25
    image[:, :, 1][seg[:, :, 1] == 255] = 255
    image[:, :, 2][seg[:, :, 2] == 255] = 13
    return image


# 动静脉叠加
def octa_applyv(img, segment):
    image = img.convert("RGB")
    image = image.resize((1024, 1024))
    image = np.array(image, dtype='uint8')
    seg = segment
    seg = seg.resize((1024, 1024))
    seg = np.array(seg, dtype='uint8')

    # bra = Image.fromarray(bra).convert("RGB")
    # bra = np.array(bra, dtype='uint8')
    image[:, :, 0][seg[:, :, 0] == 255] = 0
    image[:, :, 1][seg[:, :, 1] == 255] = 255
    image[:, :, 2][seg[:, :, 2] == 255] = 0
    return image


def applyv_ff(img, segment):
    """:param
    img: the original RGB image --- numpy array with shape: [w,h,3]
    seg: the segmentation map --- numpy array with shape: [w,h] (1-channel image)
    """
    image = Image.fromarray(img).convert("RGB")
    image = image.resize((1024, 1024))
    image = np.array(image, dtype='uint8')
    seg = Image.fromarray(segment).convert("RGB")
    seg = seg.resize((1024, 1024))
    seg = np.array(seg, dtype='uint8')

    # bra = Image.fromarray(bra).convert("RGB")
    # bra = np.array(bra, dtype='uint8')
    image[:, :, 0][seg[:, :, 0] == 255] = 25
    image[:, :, 1][seg[:, :, 1] == 255] = 255
    image[:, :, 2][seg[:, :, 2] == 255] = 13
    return image


# 血管分类叠加 返回image
def ater(image_name, seg_name):
    img = cv2.imread(image_name, flags=1)[..., ::-1]
    image = Image.fromarray(img).convert("RGB")
    image = image.resize((1024, 1024))
    image = np.array(image, dtype='uint8')

    img2 = cv2.imread(seg_name, flags=1)[..., ::-1]
    seg = Image.fromarray(img2)
    seg = seg.resize((1024, 1024))
    seg = np.array(seg, dtype='uint8')

    low = 160
    o = 0
    image[:, :, 2][seg[:, :, 2] > low] = seg[:, :, 0][seg[:, :, 2] > low]
    image[:, :, 1][seg[:, :, 2] > low] = seg[:, :, 1][seg[:, :, 2] > low]
    image[:, :, 0][seg[:, :, 2] > low] = seg[:, :, 2][seg[:, :, 2] > low]
    image[:, :, 2][seg[:, :, o] > low] = seg[:, :, 0][seg[:, :, o] > low]
    image[:, :, 1][seg[:, :, o] > low] = seg[:, :, 1][seg[:, :, o] > low]
    image[:, :, 0][seg[:, :, o] > low] = seg[:, :, 2][seg[:, :, o] > low]
    legend = cv2.imread("tmp/ves.png", flags=1)[..., ::-1]
    legend = Image.fromarray(legend)
    legend = legend.resize((100, 110))
    legend = np.array(legend, dtype='uint8')
    # print(legend.shape)
    image[0:110, 0:100, :] = legend
    return image


def ater_ff(image, seg):
    image = Image.fromarray(np.uint8(image)).convert("RGB")
    image = image.resize((1024, 1024))
    image = np.array(image, dtype='uint8')

    seg = Image.fromarray(np.uint8(seg)).convert("RGB")
    seg = seg.resize((1024, 1024))
    seg = np.array(seg, dtype='uint8')

    low = 160
    o = 0
    image[:, :, 2][seg[:, :, 2] > low] = seg[:, :, 0][seg[:, :, 2] > low]
    image[:, :, 1][seg[:, :, 2] > low] = seg[:, :, 1][seg[:, :, 2] > low]
    image[:, :, 0][seg[:, :, 2] > low] = seg[:, :, 2][seg[:, :, 2] > low]
    image[:, :, 2][seg[:, :, o] > low] = seg[:, :, 0][seg[:, :, o] > low]
    image[:, :, 1][seg[:, :, o] > low] = seg[:, :, 1][seg[:, :, o] > low]
    image[:, :, 0][seg[:, :, o] > low] = seg[:, :, 2][seg[:, :, o] > low]
    legend = cv2.imread("tmp/ves.png", flags=1)
    legend = Image.fromarray(legend)
    legend = legend.resize((100, 110))
    legend = np.array(legend, dtype='uint8')
    # print(legend.shape)
    image[0:110, 0:100, :] = legend
    return image


# 动静脉分割 返回image
def Arter(img_path):
    img_dir = img_path
    img = cv2.imread(img_dir)
    img = img[:, :, ::-1]
    img = cv2.resize(img, (512, 512))
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    r, g, b = cv2.split(img)
    r = clahe.apply(r)
    g = clahe.apply(g)
    b = clahe.apply(b)
    img = cv2.merge([r, g, b])
    img = np.array(img).transpose(2, 0, 1) / 255.0

    img = [img]

    img = torch.Tensor(img)
    img = img

    pred = Arter_net(img)
    pred[:, 1, :, :] = 0
    pred = pred.cpu().detach().numpy()
    # 从list里取出来
    pred = pred[0].transpose(1, 2, 0) * 255
    # 544,544,3 rgb
    return pred


def Arter_ff(raw):
    img = raw[:, :, ::-1]
    img = cv2.resize(img, (512, 512))
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    r, g, b = cv2.split(img)
    r = clahe.apply(r)
    g = clahe.apply(g)
    b = clahe.apply(b)
    img = cv2.merge([r, g, b])
    img = np.array(img).transpose(2, 0, 1) / 255.0

    img = [img]

    img = torch.Tensor(img)
    img = img

    pred = Arter_net(img)
    pred[:, 1, :, :] = 0
    pred = pred.cpu().detach().numpy()
    # 从list里取出来
    pred = pred[0].transpose(1, 2, 0) * 255
    # 544,544,3 rgb
    return pred


def ater_octa(image, seg):
    size = image.size
    image = image.convert("RGB")
    image = image.resize((1024, 1024))
    image = np.array(image, dtype='uint8')

    seg = seg.resize((1024, 1024))
    seg = np.array(seg, dtype='uint8')

    low = 160
    o = 0
    image[:, :, 0][seg[:, :, 2] > low] = seg[:, :, 0][seg[:, :, 2] > low]
    image[:, :, 1][seg[:, :, 2] > low] = seg[:, :, 1][seg[:, :, 2] > low]
    image[:, :, 2][seg[:, :, 2] > low] = seg[:, :, 2][seg[:, :, 2] > low]
    image[:, :, 0][seg[:, :, o] > low] = seg[:, :, 0][seg[:, :, o] > low]
    image[:, :, 1][seg[:, :, o] > low] = seg[:, :, 1][seg[:, :, o] > low]
    image[:, :, 2][seg[:, :, o] > low] = seg[:, :, 2][seg[:, :, o] > low]

    return image


# 病灶有关
def dr_2(upload_path):
    print('函数' + upload_path + '开始执行')
    image = Image.open(upload_path)
    resize_transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.321, 0.224, 0.161], std=[0.262, 0.183, 0.132]),
    ])
    image = resize_transform(image).view([1, 3, 512, 512])
    image = image
    # 分割部分
    pred_arr = segmenting_DR(upload_path)
    img_arr = showing_seg(upload_path, pred_arr)

    # c,h=cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    predict_array = np.zeros_like(pred_arr).astype(np.uint8)
    pred_flag_arr = np.argmax(pred_arr, axis=0)
    c_list = []
    w_list = []
    for j in [1, 2, 3, 5]:
        predict_array[j, :, :][pred_flag_arr == j] = 255
        c, h = cv2.findContours(predict_array[j, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        c_list.append(len(c))
        w = 0
        for cc in c:
            w += cv2.contourArea(cc)
        w_list.append(w)
    net = BiResNet(num_class=5, model_name='resnet34')
    a = os.path.join(basepath, 'DR_grading.pkl')
    abc = torch.load(a, map_location='cpu')  # , map_location='cpu'
    net.load_state_dict(abc)
    font = cv2.FONT_HERSHEY_SIMPLEX

    legend = cv2.imread('tmp/leg.png')

    # print(legend.shape)
    legend = cv2.resize(legend, (183, 240))
    # print(legend.shape)
    img_arr[0:240, 0:183, :] = legend
    # cv2.imwrite('tmp/model1/re/' + upload_path[15:], img_arr)
    with torch.no_grad():
        net.eval()
        x1, x2, predictions = net(image)
        probs = torch.softmax(predictions, dim=1)
        probs = probs.data.cpu().numpy()
        probs = probs[0].tolist()
        probs = [round(100 * x, 2) for x in probs]
    print('函数' + upload_path + '结束执行')
    return img_arr, probs, c_list, w_list


def dr_2_ff(img):
    image = Image.fromarray(np.uint8(img))
    resize_transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.321, 0.224, 0.161], std=[0.262, 0.183, 0.132]),
    ])
    image = resize_transform(image).view([1, 3, 512, 512])
    image = image
    # 分割部分
    pred_arr = fsegmenting_DR(img)
    b, g, r = cv2.split(img)
    img = cv2.merge([r, g, b])
    img_arr = fshowing_seg(img, pred_arr)

    # c,h=cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    predict_array = np.zeros_like(pred_arr).astype(np.uint8)
    pred_flag_arr = np.argmax(pred_arr, axis=0)
    c_list = []
    w_list = []
    for j in [1, 2, 3, 5]:
        predict_array[j, :, :][pred_flag_arr == j] = 255
        c, h = cv2.findContours(predict_array[j, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        c_list.append(len(c))
        w = 0
        for cc in c:
            w += cv2.contourArea(cc)
        w_list.append(w)
    net = BiResNet(num_class=5, model_name='resnet34')
    a = os.path.join(basepath, 'DR_grading.pkl')
    abc = torch.load(a, map_location='cpu')  # , map_location='cpu'
    net.load_state_dict(abc)
    font = cv2.FONT_HERSHEY_SIMPLEX

    legend = cv2.imread('tmp/leg.png')

    # print(legend.shape)
    legend = cv2.resize(legend, (183, 240))
    # print(legend.shape)
    img_arr[0:240, 0:183, :] = legend
    # cv2.imwrite('tmp/model1/re/' + upload_path[15:], img_arr)
    with torch.no_grad():
        net.eval()
        x1, x2, predictions = net(image)
        probs = torch.softmax(predictions, dim=1)
        probs = probs.data.cpu().numpy()
        probs = probs[0].tolist()
        probs = [round(100 * x, 2) for x in probs]
    # print('函数' + upload_path + '结束执行')
    return img_arr, probs, c_list, w_list


###################################3##################################
# 眼底图的图像增强
def vessel_Enhance(img_path):
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    model = create_model(opt)  # create a model given opt.model and other options
    model.setup(opt)
    visuals = model.get_current_visuals()  # get image results
    img_path = model.get_image_paths()
    if opt.eval:
        model.eval()


# 图像增强
def img_enhance(img_path, dst_path):
    # EnhanceAPI.interface(img_path, dst_path)
    return


# 视杯视盘
def img_sbsp(img_path, dst_path):
    SbspAPI.interface(img_path, dst_path)
    return


def img_Tangwang(img_path, dst_path):
    tangwang_list, tangwang_num, _ = TangwangAPI.interface(img_path, dst_path)
    res = "糖网级别：" + str(tangwang_num) + "，概率：" + str(round(tangwang_list[tangwang_num], 2))
    return res


def tw_seg(path, dst_path):
    pred_arr = segmenting_DR(path)
    img_arr = showing_seg(path, pred_arr)
    # c,h=cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    predict_array = np.zeros_like(pred_arr).astype(np.uint8)
    pred_flag_arr = np.argmax(pred_arr, axis=0)
    for j in range(predict_array.shape[0]):
        predict_array[j, :, :][pred_flag_arr == j] = 255
        c, h = cv2.findContours(predict_array[j, :, :], cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        print(len(c))


def save_db(img, file_name, token, suffix):
    result_array = np.asarray(img)
    result_bytes = array2bytes(result_array, "jpg")
    result_file = Files()
    result_file.file_name = file_name + suffix + ".jpg"
    result_file.data = result_bytes
    result_file.size = len(result_bytes) // 1024
    result_file.suffix = result_file.file_name.split(".")[-1]
    result_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    result_file.token = token + suffix
    dbs.add(result_file)
    dbs.commit()
    time.sleep(10)


def decode_base64(base64_data: str) -> Image:
    img_byte: bytes = base64.b64decode(base64_data)
    bytes_stream = BytesIO(img_byte)
    return Image.open(bytes_stream)


# 分割页面综合
def analyze_func(file_content, file_name, token):
    print(1)

    ori_Img = Image.open(Bytes2Data(file_content))
    ori_array = np.asarray(ori_Img)
    ori_bytes = array2bytes(ori_array, "jpg")

    ##################
    # 图像分析等操作
    result_enh = stillgan_ff(ori_array)
    save_db(result_enh, file_name, token, 'enh')
    # 原始图片转化
    b, g, r = cv2.split(ori_array)
    ori_array = cv2.merge([r, g, b])
    print('enh')
    result_ater_raw = Arter_ff(ori_array)
    result_ater_raw = ater_ff(ori_array, result_ater_raw)

    save_db(result_ater_raw, file_name, token, 'ater_raw')
    print('ater_raw')
    result_ater_enh = Arter_ff(result_enh)
    result_ater_enh = ater_ff(result_enh, result_ater_enh)

    save_db(result_ater_enh, file_name, token, 'ater_enh')
    print('ater_enh')
    b, g, r = cv2.split(ori_array)
    ori_array_2 = cv2.merge([r, g, b])
    result_ves_raw, feature_out_r = vseg_ff(ori_array_2)

    result_ves_raw = applyv_ff(ori_array, result_ves_raw)

    save_db(result_ves_raw, file_name, token, 'ves_raw')
    print('ves_raw')
    b, g, r = cv2.split(result_enh)
    result_enh_2 = cv2.merge([r, g, b])
    result_ves_enh, feature_out_h = vseg_ff(result_enh_2)

    result_ves_enh = applyv_ff(result_enh, result_ves_enh)
    save_db(result_ves_enh, file_name, token, 'ves_enh')
    print('ves_enh')
    b, g, r = cv2.split(ori_array)
    ori_array_2 = cv2.merge([r, g, b])
    result_ill_raw, prob_r, count_r, weight_r = dr_2_ff(ori_array_2)
    save_db(result_ill_raw, file_name, token, 'ill_raw')
    print('ill_raw')
    result_ill_enh, prob_h, count_h, weight_h = dr_2_ff(result_enh_2)
    save_db(result_ill_enh, file_name, token, 'ill_enh')
    print('ill_enh')
    LI = ['一级', '二级', '三级', '四级', '五级']

    ori_ARGS = Arguments()
    ori_ARGS.level_r = LI[np.argmax(prob_r)]
    ori_ARGS.level_h = LI[np.argmax(prob_h)]
    ori_ARGS.prob_r = max(prob_r)
    ori_ARGS.prob_h = max(prob_h)
    ori_ARGS.area1_r = weight_r[0]
    ori_ARGS.area2_r = weight_r[1]
    ori_ARGS.area3_r = weight_r[2]
    ori_ARGS.area4_r = weight_r[3]
    ori_ARGS.area1_h = weight_h[0]
    ori_ARGS.area2_h = weight_h[1]
    ori_ARGS.area3_h = weight_h[2]
    ori_ARGS.area4_h = weight_h[3]
    ori_ARGS.num1_r = count_r[0]
    ori_ARGS.num2_r = count_r[1]
    ori_ARGS.num3_r = count_r[2]
    ori_ARGS.num4_r = count_r[3]
    ori_ARGS.num1_h = count_h[0]
    ori_ARGS.num2_h = count_h[1]
    ori_ARGS.num3_h = count_h[2]
    ori_ARGS.num4_h = count_h[3]
    Tortuosity = round(feature_out_h[0][0], 2)
    VAD = round(feature_out_h[0][1], 2)
    VLD = round(feature_out_h[0][2], 2)
    FD = round(feature_out_h[0][3], 2)
    ori_ARGS.vld = VLD
    ori_ARGS.vad = VAD
    ori_ARGS.fd = FD
    ori_ARGS.tor = Tortuosity
    ori_ARGS.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    ori_ARGS.token = token + 'cha'
    dbs.add(ori_ARGS)
    dbs.commit()

    ##################
    # 保存原图
    ori_file = Files()
    ori_file.file_name = file_name + ".jpg"
    ori_file.data = ori_bytes
    ori_file.size = len(ori_bytes) // 1024
    ori_file.suffix = ori_file.file_name.split(".")[-1]
    ori_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    ori_file.token = token

    # 提交
    dbs.add(ori_file)
    dbs.commit()
    global unfinished_thread
    unfinished_thread = unfinished_thread - 1
    print('end')
    time.sleep(10)


# 血管脉分割
'''def analyze_ater(file_content , file_name, token):
    ori_Img = Image.open(Bytes2Data(file_content))
    ori_array = np.asarray(ori_Img)
    ori_bytes = array2bytes(ori_array, "jpg")

    ##################
    # 图像分析等操作 动静脉
    result_1 = Ater_ff(ori_array)
    result_ater = ater_ff(ori_array,result_1)
    result_array = np.asarray(result_ater)
    result_bytes = array2bytes(result_array, "jpg")
    result_file = Files()
    result_file.file_name = file_name + "ater.jpg"
    result_file.data = result_bytes
    result_file.size = len(result_bytes) // 1024
    result_file.suffix = result_file.file_name.split(".")[-1]
    result_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    result_file.token = token + 'ater'
    dbs.add(result_file)
    dbs.commit()
    time.sleep(10)

    # 提交2,血管分割


    result_1 = vseg_ff(ori_array)
    result_ves = applyv_ff(ori_array, result_1)
    result_array = np.asarray(result_ves)
    result_bytes = array2bytes(result_array, "jpg")
    result_file = Files()
    result_file.file_name = file_name + "ves.jpg"
    result_file.data = result_bytes
    result_file.size = len(result_bytes) // 1024
    result_file.suffix = result_file.file_name.split(".")[-1]
    result_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    result_file.token = token + 'ves'
    dbs.add(result_file)
    dbs.commit()
    time.sleep(10)
#糖网
def analyze_ill(file_content, file_name, token):

    ori_Img = Image.open(Bytes2Data(file_content))
    ori_array = np.asarray(ori_Img)
    ori_bytes = array2bytes(ori_array, "jpg")

    ##################
    # 图像分析等操作
    result_gan,prob_r,count_r,weight_r=dr_2_ff(ori_array)
    result_array = np.asarray(result_gan)
    result_bytes = array2bytes(result_array, "jpg")
    result_file = Files()
    result_file.file_name = file_name + "enh.jpg"
    result_file.data = result_bytes
    result_file.size = len(result_bytes) // 1024
    result_file.suffix = result_file.file_name.split(".")[-1]
    result_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    result_file.token = token+'enh'

    # 提交
    dbs.add(result_file)
    dbs.commit()
    time.sleep(10)
    ##################
    # 保存原图
    ori_file = Files()
    ori_file.file_name = file_name+".jpg"
    ori_file.data = ori_bytes
    ori_file.size = len(ori_bytes)//1024
    ori_file.suffix = ori_file.file_name.split(".")[-1]
    ori_file.create_time = time.strftime("%Y-%m-%d-%H %M %S", time.localtime())
    ori_file.token = token

    # 提交
    dbs.add(ori_file)
    dbs.commit()
    time.sleep(10)'''


####################################角膜############################
def pre_still2(img):
    transform_list = []
    res = img
    size = 518
    osize = [size, size]
    transform_list.append(transforms.Resize(osize, Image.BICUBIC))
    # transform_list.append(transforms.RandomCrop(size))
    transform_list += [transforms.ToTensor()]
    # transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
    transform_list += [transforms.Normalize((0.5), (0.5))]

    trans = transforms.Compose(transform_list)
    res = trans(res)
    res = Variable(torch.unsqueeze(res, dim=0).float(), requires_grad=False)
    return res


def enh_model(img, model1):
    current = img
    height, width = current.size
    pic = pre_still2(current)
    model = ResUNet(3, 3, 64, norm_layer=get_norm_layer())
    # net = torch.load('StillGAN/checkpoints/isee_csigan/120_net_G_A.pth', map_location=torch.device('cpu'))
    net = torch.load(model1, map_location=torch.device('cpu'))
    model.load_state_dict(net)
    runned = model(pic)
    image = tensor2im(runned)
    (r, g, b) = cv2.split(image)
    image = cv2.merge([b, g, r])

    return image


def return_prediction(pred):
    # for MSELoss()
    mask = pred.permute(0, 2, 3, 1).contiguous()
    mask = mask.squeeze_(0).squeeze_(-1)
    mask = mask.data.cpu().numpy() * 255
    mask[mask < 127] = 0
    mask[mask >= 127] = 255
    return mask


def predict_nerve(nerve_img, model_path):
    # load the trained model
    # define the image transformation
    net = torch.load(model_path, map_location='cpu')  # , map_location='cpu'
    transform = transforms.Compose([
        transforms.ToTensor()
    ])
    # load images
    if nerve_img is None:
        raise ValueError("Segmentation object should be directory path or an image")
    else:  # the input is an image object
        image = nerve_img
        with torch.no_grad():
            net.eval()
            image = image.crop((0, 0, 384, 384))
            image = transform(image)
            image = image.unsqueeze(0)
            fine = net(image)
            # save_prediction(coarse, "coarse_map", index)
            mask = return_prediction(fine)
    return mask


def get_roi(img, seg):
    """
    First step: obtain the roi
    :param img: numpy array
    :param seg: numpy array
    :return:
    """
    heatmap_on_image, roi, only_heatmap_on_seg, nerve_gradient_on_img = generate_heatmap(
        img, seg)
    # roi will be used in stage2,
    # the only_heatmap_on_seg, nerve_gradient_on_img are used to visualize
    return heatmap_on_image, roi, only_heatmap_on_seg, nerve_gradient_on_img


def tortuosity_grading(img, seg):
    """
    Choose one to show/visualize as you desired
    Second step: obtain the grading results
    :param img: numpy array
    :param seg: numpy array
    :return: 4 items
        "only_heatmap_on_seg": the heatmap on segmentation
        "nerve_gradient_on_img": the heatmap on nerve and the orginal image
        "pred_tortuosity_level": int value, the predicted tortuosity level
        "pred_levels_probabilities": float values, the predicted probabilities of each level (0, 1, 2, 3)
        "heatmap_on_image": show heatmap on original image
    """

    heatmap_on_image, roi, only_heatmap_on_seg, nerve_gradient_on_img = get_roi(
        img, seg)
    # 'roi': <class 'numpy.ndarray'>
    pred_tortuosity_level, pred_levels_probabilities = grading(img, seg, roi)
    pred_tortuosity_level = pred_tortuosity_level[0]
    pred_levels_probabilities = pred_levels_probabilities[0]
    return heatmap_on_image, only_heatmap_on_seg, nerve_gradient_on_img, pred_tortuosity_level, pred_levels_probabilities


################################################################################
############################请带好口罩，出示健康码##################################
#################某某路无人售票公交车，票价一元，持证请出示，下车请走好###################
################################################################################

#####################################################################################
# flask部分
app = Flask(__name__)
app.send_file_max_age_default = timedelta(seconds=1)  # 设置静态文件缓存过期时间
CORS(app=app)
app.config['UPLOAD_FOLDER'] = './uploads'


####################################################################################
# 上传文件函数，通过file判断网页
@app.route("/upload/<path:file>", methods=['POST', 'GET'])
def upload(file):
    if file == 'dr':
        ul = []
        picl = []
        infol = []
        print('111100-----------------', request.form['chunk'])
        for i in range(int(request.form['chunk'])):
            fileObj = request.files.get('files' + str(i))
            print(fileObj)
            fileObj.save(os.path.join(app.config['UPLOAD_FOLDER'], pre_process(fileObj.filename)[1] + '.png'))
            fileObj.save(os.path.join('./tmp/ct', pre_process(fileObj.filename)[1] + '.png'))
            ul.append('http://127.0.0.1:5000/tmp/ct/' + pre_process(fileObj.filename)[1] + '.png')
            rimg, info = test(os.path.join(app.config['UPLOAD_FOLDER'], pre_process(fileObj.filename)[1] + '.png'))
            infol.append(info)
            cv2.imwrite('tmp/dr/' + pre_process(fileObj.filename)[1] + '.png', rimg)
            picl.append('http://127.0.0.1:5000/tmp/dr/' + pre_process(fileObj.filename)[1] + '.png')
        return jsonify({'state': 1,
                        'imgl': ul,
                        'imgcl': picl,
                        'infol': infol
                        })
    elif file == 'tw':
        ul = []
        picl = []
        infol = []
        print('111100-----------------', request.form['chunk'])
        for i in range(int(request.form['chunk'])):
            fileObj = request.files.get('files' + str(i))
            print(fileObj)
            fileObj.save(os.path.join(app.config['UPLOAD_FOLDER'], pre_process(fileObj.filename)[1] + '.png'))
            fileObj.save(os.path.join('./tmp/ct', pre_process(fileObj.filename)[1] + '.png'))
            ul.append('http://127.0.0.1:5000/tmp/ct/' + pre_process(fileObj.filename)[1] + '.png')
            rimg, info = tw(os.path.join(app.config['UPLOAD_FOLDER'], pre_process(fileObj.filename)[1] + '.png'))
            infol.append(info)
            cv2.imwrite('tmp/tw/' + pre_process(fileObj.filename)[1] + '.png', rimg)
            picl.append('http://127.0.0.1:5000/tmp/tw/' + pre_process(fileObj.filename)[1] + '.png')
        return jsonify({'state': 1,
                        'imgl': ul,
                        'imgcl': picl,
                        'infol': infol
                        })
    elif file == 'seg':
        f = request.files['file']
        basepath = os.path.dirname(__file__)
        path_raw = os.path.join(basepath, 'tmp/seg', 'org.png')
        f.save(path_raw)
        return jsonify({'state': 1,
                        'pic': 'http://127.0.0.1:5000/tmp/seg/org.png'
                        })
    elif file == 'nerve':
        f = request.files['file']
        basepath = os.path.dirname(__file__)
        path_raw = os.path.join(basepath, 'tmp/nerve', 'org.jpg')
        f.save(path_raw)
        return jsonify({'state': 1,
                        'pic': 'http://127.0.0.1:5000/tmp/nerve/org.png'
                        })
    elif file == 'octa':
        f = request.files['file']
        basepath = os.path.dirname(__file__)
        path_raw = os.path.join(basepath, 'tmp/nerve_seg', 'octa.png')
        f.save(path_raw)
        return jsonify({'state': 1,
                        'pic': 'http://127.0.0.1:5000/tmp/nerve_seg/fundus.png'
                        })
    elif file == 'fundus':
        f = request.files['file']
        basepath = os.path.dirname(__file__)
        path_raw = os.path.join(basepath, 'tmp/nerve_seg', 'fundus.png')
        f.save(path_raw)
        return jsonify({'state': 1,
                        'pic': 'http://127.0.0.1:5000/tmp/nerve/org.png'
                        })


# 初始网站
@app.route("/uploads", methods=['POST', 'GET'])
def uploads():
    f = request.files['file']
    basepath = os.path.dirname(__file__)
    upload_path = os.path.join(basepath, 'tmp', f.filename + '.png')
    upload_filename = secure_filename(f.filename)
    f.save(upload_path)
    img = cv2.imread(upload_path)
    Img_path = os.path.join(basepath, 'tmp', "vessel.png")
    print(Img_path, upload_path)
    cv2.imwrite(Img_path, img)
    cv2.imwrite(upload_path, img)

    small_scale = img.shape[0] / 512
    width = img.shape[0] / small_scale
    length = img.shape[1] / small_scale
    #

    # width = 512
    # length = 512
    tangwang_res = img_Tangwang(upload_path, os.path.join(basepath, 'tmp', "vessel_tangwang.png"))

    img_enhance(upload_path, os.path.join(basepath, 'tmp', "vessel_enhance.png"))
    # xf_path['vessel_enhance'] = 'vessel_enhance.png'
    img_sbsp(upload_path, os.path.join(basepath, 'tmp', "vessel_sbsp.png"))

    dr_grading(upload_path, basepath)

    Img, feature_out, out = vessel_seg(upload_path)

    Img1, Img2 = applyImage(upload_path, Img, out)

    vessel_Arter(upload_path)

    Img_path = os.path.join(basepath, 'tmp', "vessel_white.png")
    cv2.imwrite(Img_path, Img)

    Img_path1 = os.path.join(basepath, 'tmp', "vessel_green.png")

    cv2.imwrite(Img_path1, Img1)
    Img_path2 = os.path.join(basepath, 'tmp', "vessel_red.png")

    cv2.imwrite(Img_path2, Img2)

    vessel_centerline("./tmp/vessel_white.png")
    vessel_centerline(os.path.join(basepath, 'tmp', "vessel_white.png"))

    Tortuosity = str(round(feature_out[0][0], 2))
    VAD = str(round(feature_out[0][1], 2))
    VLD = str(round(feature_out[0][2], 2))
    FD = str(round(feature_out[0][3], 2))
    print("start")
    info = {'Width': width,
            'Length': length,
            'Feature_out_tor': Tortuosity,
            'Feature_out_VAD': VAD,
            'Feature_out_VLD': VLD,
            'Feature_out_FD': FD,
            'tangwang_res': tangwang_res}
    xf_path = {}
    xf_path['raw'] = f.filename + '.png'
    xf_path['vessel'] = 'vessel.png'
    xf_path['vessel_Ater'] = 'vessel_Ater.png'
    xf_path['vessel_white'] = 'vessel_white.png'
    xf_path['vessel_red'] = 'vessel_red.png'
    xf_path['vessel_green'] = 'vessel_green.png'
    xf_path['vessel_sbsp'] = 'vessel_sbsp.png'
    xf_path['vessel_centerline'] = 'vessel_centerline.png'
    xf_path['vessel_tangwang'] = 'vessel_tangwang.png'
    return jsonify({
        'state': 1,
        'path': xf_path,
        'info': info
    })


# 分割综合
@app.route("/seg/<path:choice>", methods=['POST', 'GET'])
def seg(choice):
    if choice == 'enh':
        enh_start = time.time()
        img = stillgan('tmp/seg/org.png')
        cv2.imwrite('tmp/seg/enh.png', img)
        enh_end = time.time()
        print('enh time', enh_end - enh_start)
        return jsonify({'state': 1})
    if choice == 'vessel':
        current = Image.open('tmp/seg/org.png')
        height, width = current.size
        ves_start = time.time()
        # ater
        pic = Arter('tmp/seg/org.png')
        cv2.imwrite('./tmp/seg/Ater.png', pic)
        ater_pic = ater('tmp/seg/org.png', './tmp/seg/Ater.png')
        re = Image.fromarray(ater_pic).convert("RGB")
        re = re.resize((height, width))
        re.save('tmp/seg/org_ater.png')
        # ater_enh
        pic2 = Arter('tmp/seg/enh.png')
        cv2.imwrite('./tmp/seg/enh_Ater.png', pic2)
        ater_pic2 = ater('tmp/seg/enh.png', './tmp/seg/enh_Ater.png')
        re2 = Image.fromarray(ater_pic2).convert("RGB")
        re2 = re2.resize((height, width))
        re2.save('tmp/seg/enh_ater.png')
        # vessel
        vs, feature_out = vseg('tmp/seg/org.png')
        img = applyv('tmp/seg/org.png', vs, width / height)
        # img.resize((height, width))
        cv2.imwrite('tmp/seg/org_seg.png', img)
        Tortuosity = str(round(feature_out[0][0], 2))
        VAD = str(round(feature_out[0][1], 2))
        VLD = str(round(feature_out[0][2], 2))
        FD = str(round(feature_out[0][3], 2))
        # vessel_enh
        vs, feature_out_enh = vseg('tmp/seg/enh.png')
        img = applyv('tmp/seg/enh.png', vs, width / height)
        # img.resize((height, width))
        # re2 = Image.fromarray(img).convert("RGB")
        # re2 = re2.resize((height, width))
        # re2.save('tmp/seg/enh_seg.png')
        cv2.imwrite('tmp/seg/enh_seg.png', img)
        ves_end = time.time()
        Tortuosity_enh = str(round(feature_out[0][0], 2))
        VAD_enh = str(round(feature_out[0][1], 2))
        VLD_enh = str(round(feature_out[0][2], 2))
        FD_enh = str(round(feature_out[0][3], 2))
        dict = {'Tor': Tortuosity, 'VAD': VAD, 'VLD': VLD, 'FD': FD,
                'Tor_enh': Tortuosity_enh, 'VAD_enh': VAD_enh, 'VLD_enh': VLD_enh, 'FD_enh': FD_enh}
        print('enh time', ves_end - ves_start)
        return jsonify({'state': 1,
                        'dict': dict,
                        })
    if choice == 'ill':
        ill_start = time.time()
        listr = ['正常', '一级', '二级', '三级', '增殖期']
        img_r, prob_r, count_r, weight_r = dr_2('tmp/seg/org.png')
        kind_r = listr[np.argmax(prob_r)]
        p_r = prob_r[np.argmax(prob_r)]
        cv2.imwrite('tmp/seg/dr_r.png', img_r)
        img_e, prob_e, count_e, weight_e = dr_2('tmp/seg/enh.png')
        kind_e = listr[np.argmax(prob_e)]
        p_e = prob_e[np.argmax(prob_e)]
        cv2.imwrite('tmp/seg/dr_e.png', img_e)
        ill_end = time.time()
        print('enh time', ill_end - ill_start)
        flag = kind_e == kind_r
        return jsonify({'state': 1,
                        'kind_r': kind_r,
                        'p_r': p_r,
                        'count_r': count_r,
                        'weight_r': weight_r,
                        'prob_e': prob_e,
                        'kind_e': kind_e,
                        'p_e': p_e,
                        'count_e': count_e,
                        'weight_e': weight_e,
                        'flag': flag
                        })


# 分析
@app.route("/analyze", methods=['GET', 'POST'])
def analyze():
    global unfinished_thread
    # 允许的文件类型
    allow_suffix_li = ["jpg", "jpeg", "png", "tif", "tiff"]
    # 检查文件类型
    f = request.files['upload_file']
    suffix = secure_filename(f.filename).split(".")[-1]
    if suffix.lower() not in allow_suffix_li:
        return redirect(url_for('index'))

    # 读取上传的文件
    file_content = f.read()

    # 用当前时间生成token
    s1 = sha1()
    t = random.uniform(0, 2000)
    now = str(time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime())) + str(t)
    s1.update(now.encode())
    token = s1.hexdigest()
    lock = threading.Lock()
    # 创建线程
    try:
        _thread.start_new_thread(
            analyze_func, (file_content, f.filename, token,))
        unfinished_thread += 1
    except:
        print("Error: 无法启动线程")
    # 返回给用户token

    res = {}
    res["find_token"] = token
    return Response(json.dumps(res, ensure_ascii=False), content_type='application/json')


@app.route("/med1", methods=['POST', 'GET'])
def med1():
    ul = []
    picl = []
    infol = []

    basepath = os.path.dirname(__file__)
    fileObj = request.files.get('file')
    print(request.form['count'])
    fileObj.save('./tmp/model1/raw/' + str(request.form['count']) + '.png')
    print(os.path.join(basepath, './tmp/model1/raw/' + str(request.form['count']) + '.png'))
    return jsonify({'ul': ul})


@app.route("/med1run", methods=['POST', 'GET'])
def med1run():
    print(1111)
    ul = request.form['ul']
    picl = []
    infol = []
    probl = []
    numl = []
    weightl = []
    result = []
    start = time.time()
    path = './tmp/model1/tmp_store/vessel.png'
    for i in range(1, 6):
        print('tmp/model1/raw/' + str(i) + '.png')
        answer = dr_num('tmp/model1/raw/' + str(i) + '.png')
        img = answer[0]

        prob = answer[1]
        print('prob', prob)
        num = answer[2]
        wl = answer[3]
        print('num', num)
        print('wl', wl)

        cv2.imwrite('./tmp/model1/re/' + str(i) + '.png', img)
        probl.append(prob)
        weightl.append(wl)
        numl.append(num)
    end = time.time()
    print(end - start)
    return jsonify({'infol': infol,
                    'probl': probl,
                    'numl': numl,
                    'weightl': weightl})


'''
#糖网分级与分割网页（多线程版本）
@app.route("/med1run",methods=['POST','GET'])
def med1run():
    print(1111)
    ul = request.form['ul']
    picl=[]
    infol=[]
    probl=[]
    numl=[]
    result = []
    start=time.time()
    path = './tmp/model1/tmp_store/vessel.png'
    ctx = torch.multiprocessing.get_context("spawn")
    pool = ctx.Pool(processes=2)
    res_l=[]
    picl=[x[22:] for x in ul.split(',')]
    infol=['tmp/model1/re/' + str(i) + '.png' for i in range(5)]
    t=ul.split(',')
    result1 = pool.apply_async(dr_num,args=('tmp/model1/raw/0.png',))
    result2 = pool.apply_async(dr_num, args=('tmp/model1/raw/1.png',))
    result3 = pool.apply_async(dr_num, args=('tmp/model1/raw/2.png',))
    result4 = pool.apply_async(dr_num, args=('tmp/model1/raw/3.png',))
    result5 = pool.apply_async(dr_num, args=('tmp/model1/raw/4.png',))
    res_l=[result1,result2,result3,result4,result5]
    pool.close()
    pool.join()
    weightl=[]
    for key,res in enumerate(ures_l):
        answer = res.get()
        img = answer[0]
        prob = answer[1]
        num = answer[2]
        wl=answer[3]
        cv2.imwrite('./tmp/model1/re/'+str(key)+'.png', img)
        weightl.append(wl)
        probl.append(prob)
        numl.append(num)
    end=time.time()
    print(end-start)
    print('infol:',infol)
    print('probl:', probl)
    print('numl:', numl)
    print('weightl:', weightl)
    return jsonify({'infol':infol,
                    'probl':probl,
                    'numl':numl,
                    'weightl':weightl
                    })
'''


# 角膜
@app.route("/nerve/<path:choice_model>", methods=['POST', 'GET'])
def nerve(choice_model):
    choice = choice_model.split('_')[0]
    model_num = choice_model.split('_')[1]
    if choice == 'enh':
        if model_num == 'One':
            model = 'StillGAN/checkpoints/isee_csigan/75_net_G_A.pth'
        img = Image.open('tmp/nerve/org.jpg')
        org_size = img.size
        img = img.convert("L")
        img = img.resize((384, 384))
        enh_img = stillgan_model(img, model)
        enh_img = Image.fromarray(np.uint8(enh_img))
        enh_img.convert("L")
        enh_img = enh_img.resize(org_size, Image.ANTIALIAS).convert("L")
        enh_img.save('tmp/nerve/enh.png')
        return jsonify({'state': 1})
    if choice == 'seg':
        if model_num == 'One':
            model = 'csnet51_484.pkl'
        img = Image.open('tmp/nerve/org.jpg')
        img = img.convert('L')
        org_size = img.size
        img = img.resize((384, 384))

        seg = predict_nerve(img, model)
        seg = cv2.resize(seg, org_size)
        cv2.imwrite('tmp/nerve/seg_org.png', seg)
        enh_img = Image.open('tmp/nerve/enh.png')
        enh_img = enh_img.convert('L')
        enh_img = enh_img.resize((384, 384))
        enh_seg = predict_nerve(enh_img, model)
        enh_seg = cv2.resize(enh_seg, org_size)
        cv2.imwrite('tmp/nerve/seg_enh.png', enh_seg)
        return jsonify({'state': 1})
    if choice == 'grade':
        # 分割转换
        seg = cv2.imread('tmp/nerve/seg_org.png', 0)
        seg = cv2.resize(seg, (384, 384))
        ret, seg = cv2.threshold(seg, 127, 255, cv2.THRESH_TRUNC)
        enh_seg = cv2.imread('tmp/nerve/seg_enh.png', 0)
        enh_seg = cv2.resize(enh_seg, (384, 384))
        ret, enh_seg = cv2.threshold(enh_seg, 127, 255, cv2.THRESH_TRUNC)
        # 转换为cv2格式
        img = Image.open('tmp/nerve/org.jpg')
        org_size = img.size
        img = img.convert('L')
        img = img.resize((384, 384))
        enh_img = Image.open('tmp/nerve/enh.png')
        enh_img = enh_img.convert('L')
        enh_img = enh_img.resize((384, 384))
        img_cv = cv2.cvtColor(np.asarray(img), cv2.IMREAD_GRAYSCALE)[:, :, 2]
        img_cv = cv2.resize(img_cv, (384, 384))
        enh_img_cv = cv2.cvtColor(np.asarray(enh_img), cv2.IMREAD_GRAYSCALE)[:, :, 2]
        enh_img_cv = cv2.resize(enh_img_cv, (384, 384))
        # 热力图
        heatmap_on_image, only_heatmap_on_seg, nerve_gradient_on_img, pred_tortuosity_level, pred_levels_probabilities = tortuosity_grading(
            img_cv, seg)
        heatmap_on_image = heatmap_on_image.resize(org_size)
        heatmap_on_image.save('tmp/nerve/grade_org.png')
        nerve_gradient_on_img = nerve_gradient_on_img.resize(org_size)
        nerve_gradient_on_img.save('tmp/nerve/seg2_org.png')
        enh_heatmap_on_image, enh_only_heatmap_on_seg, enh_nerve_gradient_on_img, enh_pred_tortuosity_level, enh_pred_levels_probabilities = tortuosity_grading(
            enh_img_cv, enh_seg)
        enh_heatmap_on_image = enh_heatmap_on_image.resize(org_size)
        enh_heatmap_on_image.save('tmp/nerve/grade_enh.png')
        enh_nerve_gradient_on_img = enh_nerve_gradient_on_img.resize(org_size)
        enh_nerve_gradient_on_img.save('tmp/nerve/seg2_enh.png')
        return jsonify({'state': 1,
                        'org': str(pred_tortuosity_level),
                        'enh': str(enh_pred_tortuosity_level)})


# 糖网分级与分割网页（5s版本）
@app.route("/med1run_fake", methods=['POST', 'GET'])
def med1run_fake():
    print(1111)
    ul = request.form['ul']
    picl = []
    infol = ['http://127.0.0.1:5000/tmp/model1/re/1.png', 'http://127.0.0.1:5000/tmp/model1/re/2.png',
             'http://127.0.0.1:5000/tmp/model1/re/3.png', 'http://127.0.0.1:5000/tmp/model1/re/4.png',
             'http://127.0.0.1:5000/tmp/model1/re/5.png']
    probl = [[86.63, 6.54, 3.49, 1.63, 1.71], [17.76, 66.94, 10.62, 0.65, 4.02], [0.29, 1.0, 68.72, 25.03, 4.95],
             [0.11, 0.12, 3.71, 88.72, 7.34], [0.06, 0.05, 0.55, 42.49, 56.85]]
    numl = [[3, 3, 6, 0], [4, 103, 19, 0], [18, 12, 43, 5], [19, 71, 40, 4], [71, 46, 53, 3]]
    weightl = [[10084.0, 15.5, 366.5, 0], [1071.5, 10707.5, 436.0, 0], [10256.5, 4428.0, 8837.0, 51227.0],
               [38030.5, 65132.0, 15230.0, 1860.0], [74101.5, 9747.5, 6664.0, 15599.5]]
    time.sleep(5)
    return jsonify({'infol': infol,
                    'probl': probl,
                    'numl': numl,
                    'weightl': weightl
                    })


# 血管分割
# 获取图片（从/tmp文件夹中获取）
@app.route('/tmp/<path:file>', methods=['GET'])
def show_photo(file):
    if request.method == 'GET':
        if not file is None:
            try:
                image_data = open(f'tmp/{file}', "rb").read()
            except Exception as e:
                print(traceback.format_exc())
                image_data = open(f'tmp/loading.png', "rb").read()

            response = make_response(image_data)
            response.headers['Content-Type'] = 'image/png'
            return response


# 查询
@app.route("/find/<token>", methods=['GET', 'POST'])
def find(token):
    token = str(token)
    # 这个短暂的延迟避免数据库高并发出错
    t = random.uniform(0.1, 1.1)
    time.sleep(t)
    finded = False
    # 尝试从数据库取出文件
    try:
        file = dbs.query(Files).filter_by(token=token).first()
        if file:
            finded = True
            strIO = BytesIO()
            strIO.write(file.data)
            strIO.seek(0)
            return send_file(strIO, download_name=file.file_name, as_attachment=True)
        else:
            t = {}
            t["error"] = 1
            t["msg"] = "processing"
            return Response(json.dumps(t, ensure_ascii=False), content_type='application/json')

    except:
        finded = False
        dbs.rollback()
        t = {}
        t["error"] = 2
        t["msg"] = "database error, please try later."
        return Response(json.dumps(t, ensure_ascii=False), content_type='application/json')

    finally:
        # 如果用户已下载文件，则删除文件
        '''if finded:
            file = dbs.query(Files).filter_by(token=token).first()
            dbs.delete(file)
            dbs.commit()'''
        pass


@app.route("/find_cha/<token>", methods=['GET', 'POST'])
def find_cha(token):
    token = str(token)
    # 这个短暂的延迟避免数据库高并发出错
    t = random.uniform(0.1, 1.1)
    time.sleep(t)
    finded = False
    # 尝试从数据库取出参数
    try:
        ARGS = dbs.query(Arguments).filter_by(token=token).first()
        if ARGS:
            finded = True
            # t = {}
            # ll='level_r level_h prob_r prob_h area1_r area2_r area3_r area4_r area1_h area2_h area3_h area4_h num1_r num2_r num3_r num4_r num1_h num2_h num3_h num4_h vld vad fd tor'
            # ll_l=ll.split(' ')
            # t["error"] = 0
            # t["msg"] = "successes"
            # for l in ll_l:
            #     a='t[l] = ARGS.'+l
            #     exec(a)

            t = {}
            t["error"] = 0
            t["msg"] = "successes"
            t["level_r"] = ARGS.level_r
            t["level_h"] = ARGS.level_h
            t["prob_r"] = float(ARGS.prob_r)
            t["prob_h"] = float(ARGS.prob_h)
            t["area1_r"] = float(ARGS.area1_r)
            t["area2_r"] = float(ARGS.area2_r)
            t["area3_r"] = float(ARGS.area3_r)
            t["area4_r"] = float(ARGS.area4_r)
            t["area1_h"] = float(ARGS.area1_h)
            t["area2_h"] = float(ARGS.area2_h)
            t["area3_h"] = float(ARGS.area3_h)
            t["area4_h"] = float(ARGS.area4_h)
            t["num1_r"] = float(ARGS.num1_r)
            t["num2_r"] = float(ARGS.num2_r)
            t["num3_r"] = float(ARGS.num3_r)
            t["num4_r"] = float(ARGS.num4_r)
            t["num1_h"] = float(ARGS.num1_h)
            t["num2_h"] = float(ARGS.num2_h)
            t["num3_h"] = float(ARGS.num3_h)
            t["num4_h"] = float(ARGS.num4_h)
            t["vld"] = float(ARGS.vld)
            t["vad"] = float(ARGS.vad)
            t["fd"] = float(ARGS.fd)
            t["tor"] = float(ARGS.tor)

            return Response(json.dumps(t, ensure_ascii=False), content_type='application/json')
        else:
            t = {}
            t["error"] = 1
            t["msg"] = "processing"
            return Response(json.dumps(t, ensure_ascii=False), content_type='application/json')

    except Exception as e:
        print(e)
        finded = False
        dbs.rollback()
        t = {}
        t["error"] = 2
        t["msg"] = "database error, please try later."
        return Response(json.dumps(t, ensure_ascii=False), content_type='application/json')


# 下载压缩文件
@app.route('/download/<name>', methods=['GET'])
def download(name):
    dirpath = f"tmp/{name}"
    outFullName = f"tmp/{name}.zip"
    if os.path.exists(outFullName):
        os.remove(outFullName)
    zip = zipfile.ZipFile(outFullName, "w", zipfile.ZIP_DEFLATED)
    for path, dirnames, filenames in os.walk(dirpath):
        # 去掉目标跟路径，只对目标文件夹下边的文件及文件夹进行压缩
        fpath = path.replace(dirpath, '')

        for filename in filenames:
            zip.write(os.path.join(path, filename), os.path.join(fpath, filename))
    zip.close()
    directory = os.getcwd()  # 假设在当前目录
    response = make_response(send_from_directory(directory, f"tmp/{name}.zip", as_attachment=True))
    response.headers["Content-Disposition"] = "attachment; filename={}".format(outFullName.encode().decode('latin-1'))
    return response


@app.route('/new_demo', methods=['GET', 'POST'])
def new_demo():
    print(" i am running")
    is_pre_enhance = int(request.form['is_pre_enhance'])
    fundus_flag = int(request.form['fundus_flag'])
    octa_flag = int(request.form['octa_flag'])

    # OCTA
    if octa_flag:
        octa_path = 'tmp/nerve_seg/octa.png'
        octa_img = Image.open(octa_path)
        octa_img = octa_img.convert("L")
        octa_size = octa_img.size
        encoded_octa = image_to_base64(octa_img)
    else:
        encoded_octa = None
    # Fundus
    if fundus_flag:
        fundus_path = 'tmp/nerve_seg/fundus.png'
        img = Image.open(fundus_path)
        fundus_size = img.size
        encoded_fundus = image_to_base64(img)
    else:
        encoded_fundus = None
    json = {
        "octa_img": encoded_octa,  # 必填
        "fundus_img": encoded_fundus,  # 必填
        "is_enhance_fundus": 2 - is_pre_enhance,
        "is_ad": octa_flag,
        "is_av_octa": octa_flag,  # octa
        "is_av_fundus": False,
        "is_dr_multi2": fundus_flag * octa_flag,
        "is_dr_octa33": octa_flag * (1 - fundus_flag),
        "is_dr_fundus": (1 - octa_flag) * fundus_flag,
        "is_hp_multi2": fundus_flag * octa_flag,
        "is_hp_octa33": octa_flag * (1 - fundus_flag),
        "is_hp_fundus": (1 - octa_flag) * fundus_flag,
    }

    # x = requests.post('http://127.0.0.1:3000/ad/',json=json)
    # print(x.status_code)
    # print(x.text)

    x = requests.post('http://114.55.245.149:3000/combine/', json=json)
    # print(x.status_code)
    # 请求类型
    class_ = 'fundus' * (1 - octa_flag) * fundus_flag + 'octa33' * octa_flag * (
            1 - fundus_flag) + 'multi2' * fundus_flag * octa_flag
    # 查找结果的正则
    dr_class = 'dr_' + class_ + '_clas":(.*?),'
    dr_prob = 'dr_' + class_ + '_prob":(.*?),'
    ad_class = 'ad_clas":(.*?),'
    ad_prob = 'ad_prob":(.*?),'
    hp_class = 'hp_' + class_ + '_clas":(.*?),'
    hp_prob = 'hp_' + class_ + '_prob":(.*?),'
    enh_img = '"enhanced_fundus_img":"(.*?)",'
    av = '"aved_octa_img":"(.*?)",'
    av_fundus = '"aved_fundus_img":"(.*?)",'
    # print(x.text)
    # fundus增强
    with open("test.txt", "w") as f:
        f.write(x.text)
    if fundus_flag:
        if 2 - is_pre_enhance:
            enh = decode_base64(re.search(enh_img, x.text).group(1))
            enh = enh.resize(fundus_size)
            enh.save('tmp/nerve_seg/enh.png')
            img = enh
        if False:
            avv = decode_base64(re.search(av_fundus, x.text).group(1))

            av = ater_octa(f_img, avv)

            avv = Image.fromarray(np.uint8(av))
            avv = avv.resize(fundus_size)
            # cv2.imwrite(av, 'tmp/nerve_seg/seg.png')
            avv.save('tmp/nerve_seg/fundus_seg.png')
    # octa血管分割
    if octa_flag:
        avv = decode_base64(re.search(av, x.text).group(1))

        av = ater_octa(octa_img, avv)

        avv = Image.fromarray(np.uint8(av))
        avv = avv.resize(octa_size)
        # cv2.imwrite(av, 'tmp/nerve_seg/seg.png')
        avv.save('tmp/nerve_seg/seg.png')

    # 暂时用的列表

    # dr_c = int(re.search(dr_class, x.text).group(1))
    dr_p = 100 * float(re.search(dr_prob, x.text).group(1))
    # hp_c = int(re.search(hp_class, x.text).group(1))
    hp_p = 100 * float(re.search(hp_prob, x.text).group(1))
    # ad_c = int(re.search(ad_class, x.text).group(1)) if octa_flag else 0
    ad_p = 100 * float(re.search(ad_prob, x.text).group(1)) if octa_flag else 0

    return_json = {
        'dr': [100 - dr_p, dr_p],
        'hp': [100 - hp_p, hp_p],
        'ad': [100 - ad_p, ad_p] if octa_flag else [0, 0],
    }
    print(return_json)
    return jsonify(return_json)


""" tacom: 上面的都不是我写的"""

from seg_common import RedisService
from seg_common.parser.service import ParserService
from seg_common.logging import ConsoleService
from seg_common.Exception import ExceptionCallBack
from seg_system import multibatch
from seg_system import ApplicationConfig
from seg_system import vascular
from seg_system.download import DownloadDirFactory
from seg_system.download import DownloadService
from seg_system.multibatch.service.LongTaskMonitorService import LongTaskMonitorService
from seg_system.LongTaskStatusUpload import MultiStatusUploadImpl
import seg_quartz

import invoke
from concurrent.futures import ThreadPoolExecutor
from flask_apscheduler import APScheduler

aps_scheduler = APScheduler()


class MultiBatchController:
    """tacom develop
        note: 2022.7.26-
    """
    # thread pool
    executor = ThreadPoolExecutor(max_workers=ApplicationConfig.SystemConfig.NUM_PREDICT_USER)

    # save service - most for calculate where to download img
    SaveService = multibatch.SaveService()
    VascularSaveService = vascular.VascularDirFactory()
    DownloadSaveService = DownloadDirFactory()

    # monitor service - usually download info from redis, and send back to font
    TaskMonitorService = LongTaskMonitorService()
    TaskStateUploadService = MultiStatusUploadImpl()  # upload state to redis

    # primary service for multi_batch web page
    # control the process
    BatchPredictService = multibatch.BatchPredictService()

    # other(usually haven't time to reconstitution
    # below service need be re-build in BatchPredictService
    # but some Singleton service will be put there to init when flask is starting
    VascularPredictService = vascular.VascularService()

    # Common Tools
    ParserTool = ParserService.ParserTools()
    DownloadTools = DownloadService()
    RedisTools = RedisService()

    # tmp var
    future_obj = None
    ExceptionCallBackService = ExceptionCallBack()

    class MultiBatchTools:
        @staticmethod
        def saver_copy_and_init(copy_from, r, path: str = None):
            c = copy.deepcopy(copy_from)
            c.path_init_with_request(r, path)
            return c

    @staticmethod
    @app.route('/app/preditstart', methods=['POST'])
    def multi_batch_start():
        """----------------------------------------"""
        """开启线程池后，所有报错直接被拦截，不显示，需要注意"""
        """压力测试的时候，一个都炸，所以不要设置多个worker"""
        """----------------------------------------"""

        config = MultiBatchController.ParserTool.parser_config_from_upload_json(request)

        if 'result' in config:
            # 异常返回
            return jsonify(config)

        # 深复制，减少路由并发异常
        saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(MultiBatchController.SaveService, request,
                                                                         basepath)
        vascular_saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.VascularSaveService, request, basepath)

        uName, token = MultiBatchController.ParserTool.parser_name_token_from_request(request)
        redis_id = MultiBatchController.RedisTools.make_redis_key(uName, token)

        # 非主线程,无法传递request
        if MultiBatchController.future_obj is None or MultiBatchController.future_obj.done():
            MultiBatchController.TaskStateUploadService.update_seq(redis_id)
            future = MultiBatchController.executor.submit(
                MultiBatchController.BatchPredictService.predict_start,
                saver, basepath, uName, token, config,
                vascular_saver=vascular_saver,
                vascular_predictor=MultiBatchController.VascularPredictService,
                redis_id=redis_id
            )
            ExceptionCallBack.predict_pool_exception_info[id(future)] = redis_id
            MultiBatchController.future_obj = future
            future.add_done_callback(ExceptionCallBack.threading_pool_callback)
            return jsonify({'result': True, 'message': 'long task start',
                            'id': redis_id})
        else:
            return jsonify({'result': False, 'message': 'a request is dealing, waiting!!'})

    @staticmethod
    @app.route('/app/longTaskStateGet', methods=['POST'])
    def get_task_state():
        redis_id, result = MultiBatchController.ParserTool.parser_redis_id_from_request(request)
        message = MultiBatchController.TaskMonitorService.get_all(redis_id)
        return jsonify({'result': result, 'message': message})

    @staticmethod
    @app.route('/app/multiupload', methods=['POST'])
    def multi_batch_upload():

        # 创建角膜神经分割文件夹
        result, message = MultiBatchController.SaveService.img_save(request, basepath)

        # 创建用户临时目录
        result, m_2 = MultiBatchController.DownloadSaveService.path_init_with_request(request, basepath)
        message.append(m_2[0])

        # 创建神经量化评估文件夹
        result, m_1 = MultiBatchController.VascularSaveService.path_init_with_request(request, basepath)
        message.append(m_1[0])

        return jsonify({'result': result, 'message': message})

    @staticmethod
    @app.route("/app/multidownload", methods=["POST"])
    def multi_batch_download():
        uName, token = MultiBatchController.ParserTool.parser_name_token_from_request(request)
        download_saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.DownloadSaveService, request, basepath)
        return MultiBatchController.DownloadTools.download_zip_file(download_saver, uName, token)

    @staticmethod
    @app.route("/app/multi_download_label", methods=["POST"])
    def multi_batch_download_with_label():
        uName, token, language = MultiBatchController.ParserTool.parser_download_param_from_request(request)
        download_saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.DownloadSaveService, request, basepath)
        return MultiBatchController.DownloadTools.download_zip_file_with_label(download_saver, uName, token,
                   LAG_DICT=ApplicationConfig.PathConfig.DOWNLOAD_LANGUAGE[language])

    @staticmethod
    @app.route("/app/predict_get", methods=["POST"])
    def predict_get():
        img_name = MultiBatchController.ParserTool.parser_img_name_from_request(request)

        saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.SaveService, request)
        saver_with_path = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.SaveService, request, basepath)

        vascular_saver = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.VascularSaveService, request)
        vascular_saver_with_path = MultiBatchController.MultiBatchTools.saver_copy_and_init(
            MultiBatchController.VascularSaveService, request, basepath)

        return MultiBatchController.ParserTool.parse_from_image_name(
            img_name, saver, saver_with_path,
            vascular_saver=vascular_saver,
            vascular_saver_with_path=vascular_saver_with_path
        )


class SchedulerController:
    # Service
    FileCleanService = seg_quartz.FileCleanService()  # 清空上传的缓冲区

    TimeRecorderSaveService = seg_quartz.TimeRecorderSaveService()

    @staticmethod
    @aps_scheduler.task('interval', id='clean_job', days=7)
    def file_clean_start():
        SchedulerController.FileCleanService.clean_multi_batch_cache_utils_latest(basepath)

    @staticmethod
    # @aps_scheduler.task('interval', id='time_record_save', minutes=1)
    def time_record_start():
        SchedulerController.TimeRecorderSaveService.time_record()

    @staticmethod
    # @aps_scheduler.task('interval', id='update_console_log', minutes=1)
    def update_console_log():
        ConsoleService.make_logger()


if __name__ == "__main__":
    aps_scheduler.init_app(app)
    aps_scheduler.start()
    app.config['JSON_AS_ASCII'] = False
    app.run(**ApplicationConfig.SystemConfig.SERVER_MODE_DICT[ApplicationConfig.SystemConfig.SERVER_MODE])
