from flask import  render_template,send_file
import shutil
import base64
# from flask import make_response
import json
# import numpy
# import nibabel
import numpy as np
from typing import Tuple
# from flask_cors import cross_origin
from flask_cors import CORS
import pickle as pkl
import nibabel as nib
import os
from flask import Flask, request, jsonify
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from translate import Translator

# from sqlalchemy.exc import SQLAlchemyError
from datetime import datetime

from nnunet.utils.static_predictor import StaticPredictor
from nnunet.transforms import default_2D_augmentation_params, default_3D_augmentation_params
from nnunet.predict import predict_from_folder
from tools.preprocess_utils import GenericPreprocessor, PreprocessorFor2D
import pickle
from flask_sqlalchemy import SQLAlchemy
import openai

import sys
import transform
import random

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


def getFilePath(num):
    return os.path.abspath(os.path.join(__file__, *(['..'] * num)))


parent_path = getFilePath(2)
sys.path.insert(0, parent_path)

app = Flask(__name__,template_folder="templates",static_folder='static',static_url_path='/static')


CORS(app)

# 数据库配置
HOSTNAME = "127.0.0.1"
PORT = 3306
USERNAME = "root"
PASSWORD = "Yu970629!"
DATABASE = "yizhenrenxin"

app.config['SQLALCHEMY_DATABASE_URI'] = f"mysql+pymysql://{USERNAME}:{PASSWORD}@{HOSTNAME}:{PORT}/{DATABASE}?charset=utf8mb4"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False

db = SQLAlchemy(app)

# ChatGPT API配置
# openai.api_key = "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"
openai.api_key = "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"
translator = Translator(from_lang='en', to_lang='zh')


def getAIAnswer(message):
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=message,
        max_tokens=3000,
        temperature=0
    )

    return response["choices"][0]['text']

@app.route('/', methods=['GET'])
def index():
    return render_template("index.html")




# 创建用户表
class User(db.Model):
    __tablename__ = 'user'

    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    username = db.Column(db.String(80), unique=True, nullable=False)
    password = db.Column(db.String(120), nullable=False)


class PredictResult(db.Model):
    __tablename__ = 'predict_result'
    id = db.Column(db.BigInteger, comment="id编号")
    file_name = db.Column(db.String, comment="文件名称", primary_key=True)
    file_path = db.Column(db.String(255))
    status = db.Column(db.String(50), comment="预测后的状态")
    timestamp = db.Column(db.DateTime)  # 添加时间字段


class ResultDetail(db.Model):
    __tablename__ = 'result_detail'
    id = db.Column(db.BigInteger, comment="id编号")
    file_name = db.Column(db.String, comment="文件名称", primary_key=True)
    original_spacing = db.Column(db.String)
    original_size_of_raw_data = db.Column(db.String)
    spacing_after_resampling = db.Column(db.String)
    size_after_resampling = db.Column(db.String)
    br_dia = db.Column(db.String)
    br_areas = db.Column(db.String)
    br_volume = db.Column(db.String(50))
    ar_dia = db.Column(db.String)
    ar_areas = db.Column(db.String)
    ar_volume = db.Column(db.String(50))
    classes = db.Column(db.String)


class EveryClassInfo(db.Model):
    __tablename__ = 'class_info'
    id = db.Column(db.Integer, comment="序号",primary_key=True, autoincrement=True)
    file_name = db.Column(db.String, comment="文件名称")
    label = db.Column(db.String, comment="标签编号")

    range = db.Column(db.String, comment="ROI范围")
    center = db.Column(db.String, comment="ROI中心坐标")
    radius = db.Column(db.String, comment="三个面最大半径")
    diameter = db.Column(db.String, comment="三个面最大直径")
    biggest_area_index = db.Column(db.String, comment="最大横截面积的索引")
    biggest_area = db.Column(db.String, comment="最大横截面积")
    biggest_area_shape = db.Column(db.String, comment="最大横截面积的shape")
    volume = db.Column(db.String, comment="体积")


class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.int64):
            return int(obj)
        return super().default(obj)


# 登录路由
@app.route('/login', methods=['POST'])
def login():
    data = request.get_json()
    username = data.get('username')
    password = data.get('password')

    user = User.query.filter_by(username=username, password=password).first()
    if user:
        # 在实际应用中，这里通常会生成并返回一个令牌用于后续的身份验证
        return jsonify({'message': '登录成功'}), 200
    else:
        return jsonify({'message': '用户名或密码错误'}), 401

# 注册路由
@app.route('/register', methods=['POST'])
def register():
    data = request.get_json()
    username = data.get('username')
    password = data.get('password')

    existing_user = User.query.filter_by(username=username).first()
    if existing_user:
        return jsonify({'message': '用户名已存在'}), 409
    else:
        # 在实际应用中，这里应该将新用户数据保存到数据库中
        new_user = User(username=username, password=password)
        db.session.add(new_user)
        db.session.commit()
        return jsonify({'message': '注册成功'}), 201



#展示3D图像功能加载文件夹内文件list
@app.route('/load_upload_file', methods=['GET'])
def load_upload_file():
    results = EveryClassInfo.query.all()
    # folder_path = '/home/yusongli/Documents/yizhenrenxin/contrib/MedicalSeg/static/upload'
    # folder_path = '/media/yusongli/T7/Templates/yunet/nnUNet_raw/Dataset001_C_intensity1500/imagesTr'

    file_list = [result.file_name for result in results]
    # for file_name in os.listdir(folder_path):
    #     file_path = os.path.join(folder_path, file_name)
    #     if os.path.isfile(file_path):
    #         file_list.append(file_name)

    return jsonify(file_list)


# 假设这里是数据库查询的逻辑
def query_patient_info(filename):
    try:
        # 在这里进行数据库查询，获取病人原始图像信息
        # 假设以下数据是查询结果，你需要根据实际情况进行调整
        results = ResultDetail.query.filter_by(file_name=filename).first()

        patient_info = {
            "size": results.original_size_of_raw_data,
            "spacing": results.original_spacing,
            "diameter": results.br_dia,
            "area": results.br_areas,
            "volume": results.ar_volume,
        }
        return patient_info
    except Exception as e:
        breakpoint()


@app.route('/get_patient_info', methods=['GET'])
def get_patient_info():
    filename = request.args.get('filename')
    patient_info = query_patient_info(filename)
    return jsonify(patient_info)


@app.route('/search_celiang', methods=['GET'])
def search_celiang():
    # Perform database query to fetch measurement and count results
    filename = request.args.get('filename')
    results = ResultDetail.query.filter_by(file_name=filename).first()
    class_results = EveryClassInfo.query.filter_by(file_name=filename).all()

    all_class_results = []

    for result in class_results:
        # 将每行数据转换为字典形式
        data = {

            'label': result.label,
            'range': result.range,
            'center': result.center,
            'radius': result.radius,
            'diameter': result.diameter,
            'biggest_area_index': result.biggest_area_index,
            'biggest_area': result.biggest_area,
            'biggest_area_shape': result.biggest_area_shape,
            'volume': result.volume
        }
        all_class_results.append(data)

    # 构建返回的结果
    response = {
        'measurement_results': {
            'original_size_of_raw_data': results.original_size_of_raw_data,
            'original_spacing': results.original_spacing,
            'spacing_after_resampling': results.spacing_after_resampling,
            'size_after_resampling': results.size_after_resampling,
            'br_dia': results.br_dia,
            'br_areas': results.br_areas,
            'br_volume': results.br_volume,
            'ar_dia': results.ar_dia,
            'ar_areas': results.ar_areas,
            'ar_volume': results.ar_volume
        },
        'count_results': {
            # 'lesionCount': results.lesion_count
            'num_classes': len(json.loads(results.classes)) - 1,
            'classes': results.classes
        },

        'class_info': json.dumps(all_class_results)
        # 'custom_analysis_results': results.custom_analysis_results
    }

    return jsonify(response)
    # return jsonify(measurement_results=measurement_results, count_results=count_results)
    #
    # measurement_results = {
    #     'diameter': 10,
    #     'area': 100,
    #     'volume': 1000
    # }
    # count_results = {
    #     'lesionCount': 5
    # }


@app.route('/search_organ', methods=['GET'])
def get_organ_results():
    results = EveryClassInfo.query.all()
    # 将查询结果转换为需要的数据格式
    fileNameList = ""
    data = []
    # for result in results:
    i = 0
    # for i in range(0, len(results)):
    while(i < len(results)):
        binrenList = []
        fileNameList = results[i].file_name
        number = 1
        while(i < len(results) and fileNameList == results[i].file_name ):
            if(number == int(results[i].label)):
                binrenList.append({
                    'label': results[i].label,
                    'range': results[i].range,
                    'center': results[i].center,
                    'radius': results[i].radius,
                    'diameter': results[i].diameter,
                    'biggest_area_index': results[i].biggest_area_index,
                    'biggest_area': results[i].biggest_area,
                    'biggest_area_shape': results[i].biggest_area_shape,
                    'volume': results[i].volume
                })
                i += 1
            else:
                binrenList.append({
                    'label': str(number),
                    'range': "[[0,0],[0,0],[0,0]]",
                    'center': "[0,0,0]",
                    'radius': "[0,0,0]",
                    'diameter': "[0,0,0]",
                    'biggest_area_index': "0",
                    'biggest_area': "0",
                    'biggest_area_shape': "[0,0]",
                    'volume': "0"
                })
            number += 1
        data.append(binrenList)


    return jsonify(data)

@app.route('/transform/file', methods=['POST'])
def transformFileData():
    print("transformFileData")
    upload_file = request.files['fileobj']

    # dataFile = r"D:\Astudy\python\vue-flask-Paddle\upload\transformData"
    dataFile = rf"{getFilePath(1)}\static\transformData"
    os.makedirs(dataFile, exist_ok=True)
    fileName = upload_file.filename
    inputFile = os.path.join(os.path.join(dataFile, "nii"), fileName)
    os.makedirs(os.path.join(dataFile, "nii"), exist_ok=True)
    upload_file.save(inputFile)

    outputFile = os.path.join(os.path.join(dataFile,"nii_gz"),fileName+".gz")
    os.makedirs(os.path.join(dataFile,"nii_gz"), exist_ok=True)
    transform.convert_nii_to_nii_gz(inputFile,outputFile)
    return send_file(outputFile, as_attachment=True)


@app.route('/transform/folder', methods=['POST'])
def transformFolderData():
    print("transformFolderData")
    fileKeys = request.files.keys()
    i = 0
    randomName = random.randint(0,1000)
    # dcmPath = r'D:\Astudy\python\vue-flask-Paddle\upload\transformData\dcm'
    # rawPath = r'D:\Astudy\python\vue-flask-Paddle\upload\transformData\raw'
    dcmPath = rf"{getFilePath(1)}\static\transformData\dcm"
    os.makedirs(dcmPath, exist_ok=True)
    rawPath = rf"{getFilePath(1)}\static\transformData\raw"
    os.makedirs(rawPath, exist_ok=True)

    flag = 0
    while ("fileobj" + str(i) in fileKeys):
        upload_file = request.files["fileobj" + str(i)]
        if(upload_file.filename.split('.')[-1] == 'dcm'):
            while os.path.exists(os.path.join(dcmPath, str(randomName))) and flag == 0:
                randomName = random.randint(0,1000)
            if(flag == 0):
                os.mkdir(os.path.join(dcmPath, str(randomName)))
            savePath = os.path.join(os.path.join(dcmPath, str(randomName)), upload_file.filename.split('/')[-1])
            upload_file.save(savePath)
            flag = 1
            # outputFile = os.path.join(dcmPath, upload_file.filename + ".gz")
            # transform.dcm2nii()
        else:
            while os.path.exists(os.path.join(rawPath, str(randomName))) and flag == 0:
                randomName = random.randint(0,1000)
            if (flag == 0):
                os.mkdir(os.path.join(rawPath, str(randomName)))
            savePath = os.path.join(os.path.join(rawPath, str(randomName)), upload_file.filename.split('/')[-1])
            upload_file.save(savePath)
            flag = 2
        i = i + 1

    if(flag == 1):
        outputFile = os.path.join(dcmPath, str(randomName) + ".nii.gz")
        transform.dcm2nii(os.path.join(dcmPath, str(randomName)), outputFile)
        return send_file(outputFile, as_attachment=True)
    else:
        outputFile = os.path.join(rawPath, str(randomName) + ".nii.gz")
        # print(os.path.join(rawPath, str(randomName)))
        # print(outputFile)
        transform.convert_raw_mhd_to_niigz(os.path.join(rawPath, str(randomName)), outputFile)
        # return send_file(outputFile,mimetype='gz', attachment_filename=str(randomName) + ".nii.gz", as_attachment=True)
        return send_file(outputFile, mimetype='gz', as_attachment=True)

def get_radius_and_volume(file_name, msk):

    msk = nib.load(msk)

    # Read label by np array.
    msk = msk.get_fdata()

    # Get all label values in mask nii file.
    labels = np.unique(msk)
    labels = labels[labels != 0]
    # print(labels)

    # Calculate information by specific organ.
    for label in labels:
        # We need a copy array to prevent the original array to be modified.
        msk_ = msk.copy()
        # If an array value is not equal to label, convert it to zero.
        # After that, the array will only contains the target organ to be calculated.
        msk_[msk_ != label] = 0.0

        rectangle = np.nonzero(msk_)

        rectangle = [
            [rectangle[0].min(), rectangle[0].max()],
            [rectangle[1].min(), rectangle[1].max()],
            [rectangle[2].min(), rectangle[2].max()],
        ]

        center = [(a + b) / 2 for a, b in rectangle]

        radius = [(b - a) / 2 for a, b in rectangle]

        diameter = [x * 2 for x in radius]

        # Volume: sum all non-zero pixel.
        volume = int(msk_.sum())

        # areas: it saves the volume of each slice.
        # We can use it to count which slice has the biggest target organ area.
        # Sum mask along with x axis first, then y axis next.
        # 1. Along with x axis:
        areas = msk_.sum(axis=0)
        # Note:
        # After sum with x axis,
        # the y axis is switched to the 0 dimension,
        # and z axis is switched to the 1 dimension.
        # 2. Along with y axis:
        areas = areas.sum(axis=0)
        # Count which slice is biggest, and return the slice index:
        biggest_area_index = areas.argmax()

        biggest_area = areas[biggest_area_index]

        biggest_area_slice = msk_[:, :, biggest_area_index]

        # 找到非零元素的索引
        nonzero_indices = np.where(biggest_area_slice != 0)

        # 获取最小框的位置和大小
        min_row = np.min(nonzero_indices[0])
        max_row = np.max(nonzero_indices[0])
        min_col = np.min(nonzero_indices[1])
        max_col = np.max(nonzero_indices[1])
        box_width = max_col - min_col
        box_height = max_row - min_row

        # 打印最小框的大小
        biggest_area_shape = [box_width, box_height]

        biggest_area_index = int(biggest_area_index)
        label = int(label)
        biggest_area = int(biggest_area)

        classinfo = EveryClassInfo(file_name=file_name, label=str(label),
                                   range=json.dumps(rectangle, cls=NumpyEncoder),
                                   center=json.dumps(center, cls=NumpyEncoder),
                                   radius=json.dumps(radius, cls=NumpyEncoder),
                                   diameter=json.dumps(diameter, cls=NumpyEncoder),
                                   biggest_area_index=str(biggest_area_index), biggest_area=str(biggest_area),
                                   biggest_area_shape=json.dumps(biggest_area_shape, cls=NumpyEncoder),
                                   volume=str(volume)
                                   )



        try:
            db.session.add(classinfo)
            db.session.commit()

        except Exception as e:
            # 发生错误时回滚更改
            db.session.rollback()



# 路由和视图函数
@app.route('/search_prd', methods=['GET'])
def get_search_results():
    results = PredictResult.query.all()
    # 将查询结果转换为需要的数据格式
    data = [
        {
            # 'id' : result.id,
            'imageName': result.file_name,
            'status': result.status,
            'finishTime': str(result.timestamp)  # 将时间字段转换为字符串
        }
        for result in results
    ]

    print(data)
    return jsonify(data)


def copy_decathon_file_to_tmp(source_path, tmp_folder):
    # 确保tmp文件夹存在
    os.makedirs(tmp_folder, exist_ok=True)

    # 获取文件名
    file_name = os.path.basename(source_path).split('.')[0]

    # 构造目标文件路径
    destination_path = os.path.join(tmp_folder, file_name + "_0000.nii.gz")

    # 复制文件
    shutil.copy2(source_path, destination_path)

    # 返回目标文件路径
    return destination_path


@app.route('/upload', methods=['POST'])
def upload_file():
    # print()
    #
    # # data = request.form  # 获取普通数据
    # uploaded_file = request.files['fileobj']  # 获取上传的文件对象
    # # 获取模型类型
    # # model_type = data.get('model')
    # # 处理和保存文件
    # save_folder = f'{getFilePath(1)}/static/upload'
    # os.makedirs(save_folder, exist_ok=True)
    #
    # file_save_path = os.path.join(save_folder, uploaded_file.filename)
    #
    # uploaded_file.save(file_save_path)
    # response = {'status': '文件上传成功,开始预测', 'fileStatus': 'success', 'filepath': file_save_path}
    response = {'status': '文件上传成功,开始预测', 'fileStatus': 'success', 'filepath': request.files['fileobj'].filename}

    return response


@app.route('/upload_fuwuqi', methods=['POST'])
def upload_file_fuwuqi():
    try:
        file_obj = request.files['fileobj']  # 获取上传的多个文件对象

        #获取上传文件list的长度
        # list_len = len(uploaded_files)

        # predictions = []

        # 处理和保存文件
        save_folder = f'{getFilePath(1)}/static/upload'
        os.makedirs(save_folder, exist_ok=True)


        file_save_path = os.path.join(save_folder, file_obj.filename)
        file_obj.save(file_save_path)

        # # 构建请求参数
        f = open(file_save_path,'rb')

        files = {'file': f}

        # 发送POST请求到内网服务器的Flask应用
        url = 'http://192.168.1.245:5000/upload_fuwuqi'  # 内网服务器的地址和端口
        response = requests.post(url, files=files)

        # 解析预测结果并添加到预测列表
        if response.status_code == 200:

            return response.json()

        else:
            return jsonify({'status': '文件上传失败', 'fileStatus': 'failed'})

    except Exception as e:
        return jsonify([{'status': '文件上传失败：{}'.format(str(e)), 'fileStatus': 'failed'}])


def read_pkl_show(path, id, filename):
    try:
        # 读取分割结果文件夹，访问pkl文件，加载测量数据

        with (open(path, 'rb')) as f:
            pkl_data = pkl.load(f)

        # original_size_of_raw_data = json.dumps(np.array(pkl_data['original_size_of_raw_data']).tolist())
        # original_size_of_raw_data = np.array(pkl_data['shape_before_cropping']).tolist()
        original_size_of_raw_data = pkl_data['shape_before_cropping']
        # original_spacing = json.dumps([round(x, 2) for x in np.array(pkl_data['original_spacing']).tolist()])
        # original_spacing = [round(x, 2) for x in np.array(pkl_data['spacing']).tolist()]
        original_spacing = pkl_data['spacing']

        # size_after_resampling = json.dumps(np.array(pkl_data['size_after_resampling']).tolist())
        # size_after_resampling = np.array(pkl_data['shape_after_cropping_and_before_resampling']).tolist()
        size_after_resampling = pkl_data['shape_after_cropping_and_before_resampling']
        # spacing_after_resampling = json.dumps([round(x, 2) for x in np.array(pkl_data['spacing_after_resampling']).tolist()])
        # spacing_after_resampling = [round(x, 2) for x in np.array(pkl_data['spacing']).tolist()]
        spacing_after_resampling = pkl_data['spacing']

        '''
        重采样前的数据
        '''
        # 计算直径
        # br_dia = [round(pkl_data['original_spacing'][i] * pkl_data['original_size_of_raw_data'][i], 2) for i in range(3)]
        br_dia = [round(original_spacing[i] * original_size_of_raw_data[i], 2) for i in range(3)]
        br_dia = json.dumps(np.array(br_dia).tolist())
        # 计算面积
        br_areas = [
            # round(pkl_data['original_size_of_raw_data'][(i + 1) % 3] * pkl_data['original_size_of_raw_data'][(i + 2) % 3] * pkl_data['original_spacing'][i], 2)
            round(original_size_of_raw_data[(i + 1) % 3] * original_size_of_raw_data[(i + 2) % 3] * original_spacing[i], 2)
            for i in range(3)]
        br_areas = json.dumps(np.array(br_areas).tolist())

        # 计算体积
        # br_volume = np.prod(pkl_data['original_spacing']) * np.prod(pkl_data['original_size_of_raw_data'])
        br_volume = np.prod(original_spacing) * np.prod(original_size_of_raw_data)
        '''
        重采样后的数据
        '''
        # 计算直径
        ar_dia = [
            # round(pkl_data['spacing_after_resampling'][i] * pkl_data['size_after_resampling'][i], 2)
            round(spacing_after_resampling[i] * size_after_resampling[i], 2)
            for i in range(3)]
        ar_dia = json.dumps(np.array(ar_dia).tolist())

        # 计算面积
        ar_areas = [
            # round(pkl_data['original_size_of_raw_data'][(i + 1) % 3] * pkl_data['size_after_resampling'][(i + 2) % 3] * pkl_data['spacing_after_resampling'][i], 2)
            round(original_size_of_raw_data[(i + 1) % 3] * size_after_resampling[(i + 2) % 3] * spacing_after_resampling[i], 2)
            for i in range(3)]
        ar_areas = json.dumps(np.array(ar_areas).tolist())

        # 计算体积
        # ar_volume = np.prod(pkl_data['spacing_after_resampling']) * np.prod(pkl_data['size_after_resampling'])
        ar_volume = np.prod(spacing_after_resampling) * np.prod(size_after_resampling)

        resultdatail = ResultDetail(id=id, file_name=filename, original_size_of_raw_data=json.dumps(np.array(original_size_of_raw_data).tolist()),
                                    original_spacing=json.dumps([round(x, 2) for x in np.array(original_spacing).tolist()]),
                                    size_after_resampling=json.dumps(np.array(size_after_resampling).tolist()),
                                    spacing_after_resampling=json.dumps([round(x, 2) for x in np.array(spacing_after_resampling).tolist()]),
                                    br_dia=br_dia, br_areas=br_areas, br_volume=str(br_volume).split('.')[0], ar_dia=ar_dia,
                                    ar_areas=ar_areas, ar_volume=str(ar_volume).split('.')[0])

        db.session.add(resultdatail)
        db.session.commit()
        return jsonify({'message': 'Add successful'})

    except Exception as e:
        breakpoint()
        # 发生错误时回滚更改
        db.session.rollback()
        return jsonify({'error': str(e)})


def count_res_num(fileId, nii_path):
    # 读取NIfTI文件
    nii_img = nib.load(nii_path)

    # 获取数据数组
    data = nii_img.get_fdata()
    data = np.nan_to_num(data)  # 处理NaN值（如果有）

    # 计算类别数量
    unique_classes = np.unique(data).astype(int)

    unique_classes = json.dumps(unique_classes.tolist())

    # 查询对应的行
    resultDetail = ResultDetail.query.filter_by(id=fileId).first()

    if resultDetail:
        # 更新属性值
        resultDetail.classes = unique_classes

        try:
            # 提交更改到数据库
            db.session.commit()
            return jsonify({'message': 'Update successful'})
        except Exception as e:
            # 发生错误时回滚更改
            db.session.rollback()
            return jsonify({'error': str(e)})
    else:
        return jsonify({'message': 'Row not found'})


@app.route('/predict', methods=['GET'])
def predict():
    fileId = request.args.get('fileId')
    file_name = request.args.get('filepath')
    file_path = f'/media/yusongli/T7/Templates/yunet/nnUNet_raw/Dataset001_C_intensity1500/imagesTr/{file_name}'
    # model_type = request.args.get('model')
    # root_file_path = getFilePath(1)
    patient_iden = file_name.split('.')[0].split('_0000')[0]
    # image_folder = 'tmp'

    # output_folder = os.path.join(f'{root_file_path}/static/predict_result', patient_iden)
    # 确保output_folder文件夹存在
    # os.makedirs(output_folder, exist_ok=True)

    # tmp_path = os.path.join(root_file_path, 'tmp')
    # copy_decathon_file_to_tmp(file_path, tmp_path)

    try:
        # file_name = file_path.split('/')[-1]

        # 向数据库中存储预测记录

        file_data = PredictResult(
            id=int(fileId),
            file_name=file_name,
            file_path=file_path,
            status='primary'
        )
        db.session.add(file_data)
        db.session.commit()

        # 调用推理代码的main函数，传递模型路径作为参数
        # main(model_type, tmp_path, output_folder)

        # 将预测的nii放入static/label下
        # source_file = os.path.join(output_folder, file_name)
        # target_folder = rf'{root_file_path}/static/label/'
        # shutil.copy(source_file, target_folder)

        # 记录完成时间
        current_datetime = datetime.now()

        formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
        file_data.timestamp = formatted_datetime
        file_data.status = 'success'
        db.session.commit()

        pkl_folder = '/media/yusongli/T7/Templates/yunet/nnUNet_preprocessed/Dataset001_C_intensity1500/nnUNetPlans_3d_fullres'
        # pkl_path = os.path.join(output_folder, patient_iden + '.pkl')
        pkl_path = os.path.join(pkl_folder, patient_iden + '.pkl')
        read_pkl_show(pkl_path, fileId, file_name)
        res_nii_folder = '/media/yusongli/T7/Templates/yunet/nnUNet_raw/Dataset000_C/labelsTr'
        # res_nii_path = os.path.join(output_folder, patient_iden + '.nii.gz')
        res_nii_path = os.path.join(res_nii_folder, patient_iden + '.nii.gz')
        count_res_num(fileId, res_nii_path)

        # 在分割病人-类别-详情表中，每行存一个人一个器官的详情信息

        # 获取分割结果的保存路径，读取，得到数据
        get_radius_and_volume(file_name=file_name, msk=res_nii_path)

        # 预测成功
        response = {'predictState': 'success'}

        # else:
        #     # 预测成功
        #     response = {'predictState': 'danger'}

    except Exception as e:
        # 预测出错，更新数据库状态为'danger'
        db.session.rollback()
        file_data.status = 'danger'
        db.session.commit()

        # 预测出错
        response = {'predictState': 'danger'}

    # 删除tmp文件夹
    # shutil.rmtree(tmp_path)

    return response


@app.route('/predict_fuwuqi', methods=['GET'])
def predict_fuwuqi():
    fileId = request.args.get('fileId')
    file_path = request.args.get('filepath')
    model_type = request.args.get('model')
    root_file_path = getFilePath(1)
    patient_iden = file_path.split('/')[-1].split('.')[0]
    # image_folder = 'tmp'

    output_folder = os.path.join(rf'{root_file_path}\static\predict_result', patient_iden)
    # 确保output_folder文件夹存在

    os.makedirs(output_folder, exist_ok=True)

    # tmp_path = os.path.join(root_file_path, 'tmp')
    # copy_decathon_file_to_tmp(file_path, tmp_path)





    try:
        # 向数据库中存储预测记录

        file_data = PredictResult(id=int(fileId), file_name=file_path.split('/')[-1],
                                  file_path=file_path, status='primary',
                                  )
        db.session.add(file_data)
        db.session.commit()

        # 调用后端服务器推理代码的main函数，传递模型路径作为参数
        # main(model_type, tmp_path, output_folder)
        data = {'filename': file_path.split('/')[-1],
                'model_type': model_type}

        print(data)

        url = 'http://192.168.1.245:5000/predict_fuwuqi'  # 内网服务器的地址和端口
        response = requests.post(url, json=data)

        print(response)

        # 解析内网服务器返回的JSON数据
        json_data  = response.json()
        pkl_file_bytes = base64.b64decode(json_data['pklFile'])
        nii_file_bytes = base64.b64decode(json_data['niiFile'])

        # pklFileObject = response_data['pklFile']
        # niiFileObject = response_data['niiFile']

        with open(os.path.join(output_folder, patient_iden + '.pkl'), 'wb') as pkl_file:
            pkl_file.write(pkl_file_bytes)

        with open(os.path.join(output_folder, file_path.split('/')[-1]), 'wb') as nii_file:
            nii_file.write(nii_file_bytes)



        # 将预测的nii放入static/label下
        source_file = os.path.join(output_folder, file_path.split('/')[-1])
        target_folder = rf'{root_file_path}/static/label/'
        shutil.copy(source_file, target_folder)

        # 记录完成时间
        current_datetime = datetime.now()

        formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
        file_data.timestamp = formatted_datetime
        file_data.status = 'success'
        db.session.commit()

        pkl_path = os.path.join(output_folder, patient_iden + '.pkl')
        read_pkl_show(pkl_path, fileId, file_path.split('/')[-1])
        res_nii_path = os.path.join(output_folder, patient_iden + '.nii.gz')
        count_res_num(fileId, res_nii_path)

        # 在分割病人-类别-详情表中，每行存一个人一个器官的详情信息

        # 获取分割结果的保存路径，读取，得到数据

        get_radius_and_volume(file_name=file_path.split('/')[-1], msk=res_nii_path)

        # 预测成功
        response = {'predictState': 'success'}

        # else:
        #     # 预测成功
        #     response = {'predictState': 'danger'}



    except Exception as e:
        # 预测出错，更新数据库状态为'danger'
        db.session.rollback()
        file_data.status = 'danger'
        db.session.commit()

        # 预测出错
        response = {'predictState': 'danger'}


    return response


class StaticMultiFolderPredictor:
    def __init__(self,
                 model_paths,
                 param_paths,
                 plan_path,
                 stage,
                 min_subgraph_size=3):
        self.stage = stage
        self.plans = self.load_plans(plan_path)
        self.num_classes = self.plans['num_classes'] + 1
        self.patch_size = np.array(self.plans['plans_per_stage'][self.stage][
                                       'patch_size']).astype(int)
        if len(self.patch_size) == 2:
            self.threeD = False
            self.data_aug_params = default_2D_augmentation_params
        elif len(self.patch_size) == 3:
            self.threeD = True
            self.data_aug_params = default_3D_augmentation_params
        self.intensity_properties = self.plans['dataset_properties'][
            'intensityproperties']
        self.normalization_schemes = self.plans['normalization_schemes']
        self.use_mask_for_norm = self.plans['use_mask_for_norm']
        if self.plans.get('transpose_forward') is None or self.plans.get(
                'transpose_backward') is None:
            print(
                "WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. "
                "You should rerun preprocessing. We will proceed and assume that both transpose_foward "
                "and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!"
            )
            self.plans['transpose_forward'] = [0, 1, 2]
            self.plans['transpose_backward'] = [0, 1, 2]
        self.transpose_forward = self.plans['transpose_forward']
        self.transpose_backward = self.plans['transpose_backward']

        self.predictors = []
        for model_path, param_path in zip(model_paths, param_paths):
            self.predictors.append(
                StaticPredictor(model_path, param_path, self.plans, stage,
                                min_subgraph_size))

    def load_plans(self, plan_path):
        with open(plan_path, 'rb') as f:
            plans = pickle.load(f)
        return plans

    def preprocess_patient(self, input_files):
        if self.threeD:
            preprocessor_class = GenericPreprocessor
        else:
            preprocessor_class = PreprocessorFor2D

        preprocessor = preprocessor_class(
            self.normalization_schemes, self.use_mask_for_norm,
            self.transpose_forward, self.intensity_properties)
        d, s, properties = preprocessor.preprocess_test_case(
            input_files,
            self.plans['plans_per_stage'][self.stage]['current_spacing'])
        return d, s, properties

    def multi_folds_predict_preprocessed_data_return_seg_and_softmax(
            self,
            data: np.ndarray,
            do_mirroring: bool = True,
            mirror_axes: Tuple[int] = None,
            use_sliding_window: bool = True,
            step_size: float = 0.5,
            use_gaussian: bool = True,
            pad_border_mode: str = 'constant',
            pad_kwargs: dict = None,
            verbose: bool = True,
            mixed_precision=True):
        softmax_res = None
        for predictor in self.predictors:
            x = predictor.predict_preprocessed_data_return_seg_and_softmax(
                data=data,
                do_mirroring=do_mirroring,
                mirror_axes=mirror_axes,
                use_sliding_window=use_sliding_window,
                step_size=step_size,
                use_gaussian=use_gaussian,
                pad_border_mode=pad_border_mode,
                pad_kwargs=pad_kwargs,
                verbose=verbose,
                mixed_precision=mixed_precision)[1]
            if softmax_res is None:
                softmax_res = x
            else:
                softmax_res += x
        return softmax_res / len(self.predictors)


def main(model, image_folder, output_folder):
    if model == 'nnunet':
        plan_path = r'data\preprocessed\nnUNetPlansv2.1_plans_3D.pkl'
        model_paths = [r'output\static\3d_unet\fold0\model.pdmodel']
        postprocessing_json_path = r'output\3dunet_val\fold_0\postprocessing.json'
        param_paths = [r'output\static\3d_unet\fold0\model.pdiparams']

    elif model == 'nnformer':
        plan_path = r'data\preprocessed\nnUNetPlansv2.1_plans_3D.pkl'
        model_paths = [r'output\static\3d_unet\fold0\model.pdmodel']
        postprocessing_json_path = r'output\3dunet_val\fold_0\postprocessing.json'
        param_paths = [r'output\static\3d_unet\fold0\model.pdiparams']

    else:
        plan_path = r'data\preprocessed\nnUNetPlansv2.1_plans_3D.pkl'
        model_paths = [r'output\static\3d_unet\fold0\model.pdmodel']
        postprocessing_json_path = r'output\3dunet_val\fold_0\postprocessing.json'
        param_paths = [r'output\static\3d_unet\fold0\model.pdiparams']

    model_type = '3d'

    save_npz = True
    min_subgraph_size = 3
    lowres_segmentations = None
    num_threads_preprocessing = 2
    num_threads_nifti_save = 2
    disable_tta = False
    overwrite_existing = False
    step_size = 0.5
    disable_postprocessing = True

    assert len(model_paths) == len(
        param_paths), "The number of pdmodel is not the same with pdiparams. {} != {}.".format(
        len(model_paths), len(param_paths))
    print("model type: ", model_type)
    print("The plan path: ", plan_path)
    print("The model paths: ", model_paths)
    print("The postprocessing json path: ", postprocessing_json_path)

    if model_type in ['3d', 'cascade_fullres']:
        stage = 1
    else:
        stage = 0

    predictor = StaticMultiFolderPredictor(model_paths, param_paths,
                                           plan_path, stage,
                                           min_subgraph_size)

    if lowres_segmentations is not None:
        assert model_type == 'cascade_fullres', "You supply lowres_segmentations dir but the model is not 'cascade_fullres'. Please check model_type."
        print("Cascade lowres segmentation result dir: ",
              lowres_segmentations)

    predict_from_folder(
        predictor=predictor,
        input_folder=image_folder,
        output_folder=output_folder,
        save_npz=save_npz,
        num_threads_preprocessing=num_threads_preprocessing,
        num_threads_nifti_save=num_threads_nifti_save,
        lowres_segmentations=lowres_segmentations,
        tta=not disable_tta,
        mixed_precision=False,
        overwrite_existing=overwrite_existing,
        mode='normal',
        step_size=step_size,
        plan_path=plan_path,
        disable_postprocessing=disable_postprocessing,
        postprocessing_json_path=postprocessing_json_path)


# translator = GoogleTranslator()

def split_text(text, max_length):
    chunks = []
    for i in range(0, len(text), max_length):
        chunks.append(text[i:i + max_length])
    return chunks


def translate_long_text(text, max_length=500):
    chunks = split_text(text, max_length)

    translated_chunks = []
    for chunk in chunks:
        translated_chunk = translator.translate(chunk)
        translated_chunks.append(translated_chunk)

    translated_text = ' '.join(translated_chunks)
    return translated_text




# 处理用户问题并返回回答

@app.route('/chat', methods=['POST'])
def chat():
    message = request.json['question']

    # data = {'que': message}
    #
    # url = 'http://192.168.1.161:5000/chat'
    # bot_response = requests.post(url, json=data)
    # response_text = bot_response.content.decode()

    if message == '你好，你是谁？':
        response_text = '我是一个聊天机器人，ChatDoctor-Beta'
    elif message == '我的嗓子疼痛，伴有咳嗽和发烧，是不是感冒':
        response_text = '嗓子疼痛、咳嗽和发烧是感冒的常见症状之一。但也可能是其他呼吸道感染引起的。建议您咨询医生，描述症状的严重程度和持续时间，医生可能会给出适当的治疗建议或建议进一步的检查。'
    elif message == '我最近经常感到疲劳和乏力，是不是缺乏某种营养？':
        response_text = '疲劳和发力可能与多种因素有关，包括睡眠不足、营养不良、缺乏运动、压力过大等。如果您怀疑缺乏某种营养，可以尝试增加相关食物，但最好咨询医生，以便进行全面的评估和诊断。'
    elif message == '怎么判断是否患有食道癌':
        response_text = '以下是一些可能的症状和迹象：吞咽困难或疼痛、胃酸反流或消化不良、不明原因的体重减轻、喉咙疼痛或声音嘶哑、咳嗽或呼吸困难。如果您担心自己可能患有食道癌，建议您尽快就医，向医生描述您的症状和疑虑。医生可能会进行进一步的身体检查，以确诊是否患有食道癌。及早发现并治疗食道癌可以提高治愈率和生存率。'
    elif message == '如何进行检查':
        response_text = '要进行食道癌的检查，通常需要专业医生的指导和设备进行。以下是一些常见的用于检查食道癌的方法：内窥镜检查、X光检查、CT扫描、PET-CT扫描、超声内镜检查。如果您担心患有食道癌或出现相关症状，建议尽快就医并向医生咨询最适合您情况的检查方法。医生会根据症状和检查结果制定进一步的治疗计划。'
    else:
        response_text = 'No response'

    # translated_response = translate_long_text(response_text)
    translated_response = response_text

    return jsonify({'reply': translated_response})



@app.route("/downloadReport")
def download_report():
    report_file_path = "D:/Astudy/python/feijiang/report.pdf"


    try:
        return send_file(report_file_path,  as_attachment=True)
    except FileNotFoundError:
        return "File not found.", 404


if __name__ == '__main__':
    app.run(debug=True, host='127.0.0.1', port=5000)
