from flask import redirect
from efficientnet_pytorch import EfficientNet
import torch.nn as nn
import torch
import torchvision
import flask,requests
from flask import request, render_template,session,url_for,Response,jsonify
import torch
from PIL import Image
import time,io,re
from torch.nn import functional as F
from torchvision import  transforms
import torch.backends.cudnn as cudnn
import base64,os
from web.app.maintest import test_retrieval,get_list_user
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField,SelectField,TextAreaField
from wtforms.validators import Required,DataRequired
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
import sys,json

app = flask.Flask(__name__)
bootstrap = Bootstrap(app)
app.config['SECRET_KEY'] = 'hard to guess string'



host = '192.168.0.139'
port = 8899


model = None
use_gpu = True
num_gpu = 2
def load_model():
    """Load the pre-trained model, you can use your model just as easily.
    """
    global model_integration,feature_class,model

    weight_path = r'/home/ubuntu/data/weights'
    model_integration = {}
    feature_class = {}

    for file in os.listdir(weight_path):
        resume = os.path.join(weight_path,file, 'model_best.pth.tar')
        n_class = int(file[-1])
        feature_class.update({str(file[8:10]):n_class})

        model_name = 'efficientnet-b3'
        model = EfficientNet.from_pretrained(model_name)
        model._fc = nn.Linear(1536, n_class)

        if use_gpu:
            model = nn.DataParallel(model, device_ids=range(num_gpu))
            model.cuda()
            cudnn.benchmark = True
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()
        model_integration.update({str(file[8:10]):model})
    print('Loaded pretrained weights for efficientnet-b3')

def feature_tongue(file):
    result = {'success': False}

    def ThreadPool(image, keys, model, result):
        preds = F.softmax(model(image), dim=1)
        results = preds[0].cpu().detach().numpy()
        result['predictions'].append((int(keys)-1, results.argmax()))
        return result
    path_file = r'/home/ubuntu/data/dieseas_test/0'
    # assert  sys.exit(os.path.join(path_file,file))
    image = Image.open(os.path.join(path_file,file))
    image = prepare_image(image, target_size=(448, 448))
    result['predictions'] = list()

    pool = ThreadPoolExecutor(max_workers=1)
    futures = {
        pool.submit(
            ThreadPool,
            image,
            keys,
            model,
            result
        ):
            model for keys, model in model_integration.items()
    }
    for future in as_completed(futures):
        result = future.result()
    print(result['predictions'])
    sorted_result = sorted(result['predictions'], key=lambda key: key[0], reverse=False)
    res = [sor for _, sor in sorted_result]

    return res

class NameForm(FlaskForm):
    # user_info = TextAreaField('患者信息:[年龄int] [体重kg] [身高cm],例：23 54 160',validators= [DataRequired()] )
    # report = TextAreaField('患者自述:普通文本', validators=[DataRequired()])
    # inquiry = TextAreaField('问诊单：格式:question1##answer11[$$answer12[$$...]]\r\nquestion2##answer21[$$answer22[$$...]]...', validators=[DataRequired()])
    # tongue_file = TextAreaField('舌照名:例：inquiry-xxx.jpg', validators=[DataRequired()])
    index_file = TextAreaField('小鹿病历相似度匹配', validators=[DataRequired()])
    submit = SubmitField('Submit')

class DataGeneration(object):
    def __init__(self, inquiry_mapping_file='/home/ubuntu/data/web/app/data_file_api/data_all.txt',
                 ):
        if inquiry_mapping_file and os.path.isfile(inquiry_mapping_file):
            # 问题-选项与其向量下标的映射字典
            f_file = open(inquiry_mapping_file, 'r').readlines()

            self.data_list, keys_list = [], []
            for id, file in enumerate(f_file):
                self._mapping_dic = {}
                data_list = file.strip('\n').split('\t')
                if id == 0:
                    keys_list = data_list
                else:
                    for i in range(len(keys_list)):
                        self._mapping_dic['id'] = id - 1
                        self._mapping_dic[keys_list[i]] = data_list[i]
                    self.data_list.append(self._mapping_dic)

        else:
            self.data_list = None

# @app.route('/rank1')
# def rank1():
#     return render_template('/show1.html')

@app.route('/rank')
def rank():

    sort = request.args.get("st", type=int, default=0)
    text_usr_sort = text_usr_dict[:-1]
    text_orig = text_usr_dict[-1]
    if sort == 1:
        text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['basic_info_sim'], reverse=True)
    elif sort == 2:
        text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['report_sim'], reverse=True)
    elif sort == 3:
        text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['inquiry_sim'], reverse=True)
    elif sort == 4:
        text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['tongue_sim'], reverse=False)
    elif sort == 5:
        text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['total_sim'], reverse=True)

    text_usr_sort.append(text_orig)
    text_usr_dict_json = json.dumps(text_usr_sort, ensure_ascii=False)

    return text_usr_dict_json

@app.route('/show1')
def show1():
    return render_template('/show2.html')

@app.route('/getMatchData')
def show():
    # 求相似病历
    global text_usr,user_info,report,inquiry_sheet,tongue_picture,\
        tongue_feature,tongue_description,text_usr_dict,diagnosis,herbs
    index_user = request.args.get("bt", type=int, default=0)
    print(index_user,'*********')
    ###  异常检查
    # sort = 0
    sort = request.args.get("st", type=int, default=0)
    if sort not in [1,2,3,4,5]:
        sort = 0
    if sort == 0:
        text_usr = DataGeneration().data_list[index_user]

        sex_dict = {'0': '男', '1': '女','NULL':'NULL'}
        user_info = [text_usr['sex'],text_usr['age'],text_usr['height'],text_usr['weight']]
        report = text_usr['report']
        inquiry_sheet = text_usr['inquiry_sheet']
        tongue_picture = text_usr['tongue_picture']
        tongue_feature = text_usr['tongue_feature']
        tongue_description = text_usr['tongue_description']
        diagnosis = text_usr['diagnosis']
        herbs = text_usr['herbs']

        text_usr['user_info'] = [sex_dict[text_usr['sex']],text_usr['age'],text_usr['height'],text_usr['weight']]
        text_usr_dict = similarity_ranking.retrieval(user_info, report, inquiry_sheet, tongue_feature)
        text_usr_dict.append(text_usr)
        text_usr_dict_json = json.dumps(text_usr_dict, ensure_ascii=False)
    else:

        text_usr_sort = text_usr_dict[:-1]
        text_orig = text_usr_dict[-1]
        if sort == 1:
            text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['basic_info_sim'], reverse=True)
        elif sort == 2:
            text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['report_sim'], reverse=True)
        elif sort == 3:
            text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['inquiry_sim'], reverse=True)
        elif sort == 4:
            text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['tongue_sim'], reverse=False)
        elif sort == 5:
            text_usr_sort = sorted(text_usr_dict[:-1], key=lambda x: x['total_sim'], reverse=True)
        text_usr_sort.append(text_orig)
        text_usr_dict_json = json.dumps(text_usr_sort, ensure_ascii=False)
    return text_usr_dict_json

    # return render_template('/show2.html', user_info=user_info, report=report,inquiry_sheet=inquiry_sheet,
    #                        tongue_picture=tongue_picture,tongue_feature=tongue_feature,
    #                        tongue_description = tongue_description,
    #                        text_usr_dict=text_usr_dict, diagnosis = diagnosis,
    #                        herbs = herbs ,sort = sort)


@app.route('/show_img')
def show_img():
    tongue_file = request.args.get("file", type=str, default='1')
    print(tongue_file, '*********')
    return render_template('show_original_img.html', tongue_file=tongue_file)



def inquire_answer(inquiry_value):
    value_dict = ''
    if inquiry_value == 'NULL':
        value_dict = 'NULL'
    else:
        array2 = inquiry_value.split('@@')
        if len(array2) > 0 :
            if len(array2[0].split('##')) > 1:
                value_dict += array2[0].split('##')[1]
                for k in range(1,len(array2)):
                    value_dict += array2[k].split('##')[1]

    return value_dict

@app.route('/sp',methods=['GET','POST'])
def search():
    global  text_usr,usr_data_string
    text_usr = DataGeneration().data_list

    keyword = request.args.get('keyword')
    print(keyword.split(' '))


    index_list = []
    for i in range(len(text_usr)):
        value_dict = inquire_answer(text_usr[i]['inquiry_sheet'])
        for word in re.split(' |,|，', keyword):
            if word in text_usr[i]['report'] or word in  value_dict or\
                    word in text_usr[i]['diagnosis']:
                if i not in index_list:
                    index_list.append(i)


    if len(index_list) >100:
        index_list = index_list[:100]
    text_usr_sea = [text_usr[k] for k in index_list]
    return json.dumps(text_usr_sea,ensure_ascii = False)


@app.route("/")
def index():
    return render_template('index.html')

# @app.route('/', methods=['GET','POST'])
# def index():
#     global index_file,user_info,report,inquiry,text_usr,tongue_file,tongue_feature
#     index_file = ''
#     # global user_info,report,inquiry,text_usr,tongue_file,tongue_feature
#     user_info = ''
#     report = ''
#     inquiry = ''
#     text_usr = ''
#     tongue_file =''
#     form = NameForm()
#     tongue_feature = None
#     if form.validate_on_submit():
#         index_file = form.index_file.data
#         data_user = get_list_user()[int(index_file)]
#         user_info = data_user['patient_information']
#         report = data_user['condition_report']
#         inquiry = data_user['inquiry_sheet']
#
#         tongue_file = data_user['tongue_url']
#         if data_user['tongue_vector'] != 'NULL':
#
#             tongue_feature = [int(i) for i in data_user['tongue_vector'][1:-1].split(',')]
#         else:
#             tongue_feature = 'NULL'
#         start = time.time()
#
#         ## text_usr：返回100个相似度病历
#         top_k = 100
#         text_usr = test_retrieval(user_info,report,inquiry,tongue_feature)
#
#         # print(text_usr)
#         # print(time.time()-start)
#
#         return redirect(url_for('show'))
#
#     return render_template('index.html', tongue_file=tongue_file,form=form)



# @app.route('/predict', methods=['POST'])
# def predict():
#     start = time.time()
#
#     result = {'success':False}
#
#     def ThreadPool(image, keys, model, result):
#         preds = F.softmax(model(image), dim=1)
#         results = preds[0].cpu().detach().numpy()
#         r = 'feature{}  label:{} '.format(int(keys), results.argmax())
#         result['predictions'].append(r)
#         return result
#
#     if request.method == 'POST':
#         if request.form.get('photo'):
#             # Read image in PIL format
#             image = Image.open(io.BytesIO(base64.b64decode(request.form.get('photo'))))
#
#             print(type(image))
#             image = prepare_image(image, target_size=(448,448))
#             result['predictions'] = list()
#             # for keys, model in model_integration.items():
#             #     preds = F.softmax(model(image),dim = 1)
#             #     results = preds[0].cpu().detach().numpy()
#             #     r = 'feature{}  label:{} '.format(int(keys),results.argmax())
#             #     result['predictions'].append(r)
#             # list1 = sorted(dict1.items(), key=lambda x: x[0])
#             pool = ThreadPoolExecutor(max_workers=2)
#             futures = {
#                 pool.submit(
#                     ThreadPool,
#                     image,
#                     keys,
#                     model,
#                     result
#                     ):
#                    model for keys, model in model_integration.items()
#             }
#             for future in as_completed(futures):
#                 result = future.result()
#             result['success'] = True
#             end = time.time()
#             print(end - start, result, '*********')
#
#     return flask.jsonify(result)

def scale_keep_ar_min_fixed(img, fixed_min):
    ow, oh = img.size

    if ow < oh:

        nw = fixed_min

        nh = nw * oh // ow

    else:

        nh = fixed_min

        nw = nh * ow // oh
    return img.resize((nw, nh), Image.BICUBIC)

def prepare_image(image, target_size):
    """Do image preprocessing before prediction on any data.
    """
    use_gpu = True
    if image.mode != 'RGB':
        image = image.convert("RGB")

    # Resize the input image nad preprocess it.
    image = transforms.Lambda(lambda img: scale_keep_ar_min_fixed(img, 448))(image)
    image = transforms.CenterCrop((448, 448))(image)
    image = transforms.ToTensor()(image)

    # Convert to Torch.Tensor and normalize.
    image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)

    # Add batch_size axis.
    image = image[None]
    if use_gpu:
        image = image.cuda()
    return torch.autograd.Variable(image, volatile=True)

if __name__ == '__main__':
    # load_model()
    # debug=True,
    top_k = 100
    from web.app.similarity_ranking import SimilarityRanking
    global similarity_ranking
    inquiry_mapping_file = '/home/ubuntu/data/web/app/data_file_api/data_all.txt'
    similarity_ranking = SimilarityRanking(inquiry_mapping_file, top_k= top_k)

    app.run(host = host, port = port)
    # end = r'/home/ubuntu/data/web/app/static/img'
    # for file_path in os.listdir(r'/home/ubuntu/data_tongue'):
    #     if file_path.startswith('inquiry') or file_path.endswith('_top') \
    #         or file_path.startswith('cropp'):
    #         for file in os.listdir(os.path.join(r'/home/ubuntu/data_tongue',file_path)):
    #             file_split = file[8:].split('!')[0]+ '.jpg'
    #             if not os.path.exists(os.path.join(end,file)):
    #                 os.symlink(os.path.join(r'/home/ubuntu/data_tongue',file_path,file),
    #                         os.path.join(end, file_split))