# -*- coding: utf-8 -*-
# @Time    : 2023/5/18 10:16 下午
# @Author  : Wu WanJie

import os
import numpy as np
import json
import torch
import copy
from loguru import logger
from flask import request
from flask import Flask, make_response
from flask_restful import Resource, Api
from werkzeug.exceptions import BadRequest
from transformers import (
    BertTokenizer,
    RobertaTokenizer,
    AutoTokenizer,

    BertConfig,
    RobertaConfig,
    ErnieConfig,

    BertForSequenceClassification,
    RobertaForSequenceClassification,
    ErnieForSequenceClassification,

)

from dataset import load_label_system
MODEL_CLASSES = {
    "bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
    "roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
    "ernie": (ErnieConfig, ErnieForSequenceClassification, AutoTokenizer)
}

app = Flask(__name__)
api = Api(app)

THRESHOLD = {
  "性价比": 0.6,
  "外形外观": 0.8,
  "模块分区": 0.3,
  "能耗": 0.6,
  "散热能力": 0.4,
  "赠送礼品": 0.3,
  "送货安装服务": 0.9,
  "整体尺寸": 0.3,
  "是否有故障": 0.7,
  "保鲜效果": 0.3,
  "活动效果": 0.6,
  "正品": 0.3,
  "外观材质": 0.9,
  "是否结霜": 0.6,
  "消费者体验": 0.3,
  "发货和物流": 0.7,
  "价格意见": 0.9,
  "空间布局": 0.4,
  "便捷性": 0.5,
  "产品功能": 0.9,
  "容量大小": 0.7,
  "服务质量": 0.7,
  "产品描述": 0.4,
  "制冷效果": 0.4,
  "客服服务": 0.3,
  "智能化": 0.8,
  "售后服务": 0.4,
  "软冷冻": 0.3,
  "净味功能": 0.8,
  "风格设计": 0.3,
  "噪音大小": 0.4,
  "向他人推荐": 0.3,
  "产品质量": 0.7,
  "箱门密封性": 0.3,
  "忠诚度": 0.6,
  "是否包装完好": 0.3
}


class MultiLabelInferenceModel(object):
    def __init__(self, model_name_or_path, device, model_type, label_path):
        self.device = device
        self.labels_map = load_label_system(label_path)
        config_class, model_class, tokenize_class = MODEL_CLASSES[model_type]
        self.tokenizer = tokenize_class.from_pretrained(model_name_or_path)
        self.config = config_class.from_pretrained(model_name_or_path, num_labels=len(self.labels_map["label2id"]))
        self.model = model_class.from_pretrained(
            model_name_or_path,
            config=self.config
        )
        self.model.to(self.device)
        self.model.eval()

    def predict(self, text):
        inputs = self.tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
        label = []
        proba = dict()
        with torch.no_grad():
            for key, val in inputs.items():
                inputs[key] = val.to(self.device)
            outputs = self.model(**inputs)
            logits = torch.sigmoid(outputs.logits)
            y_pred_list = logits.cpu().detach().numpy()
            y_pred_list_ori = copy.deepcopy(y_pred_list).tolist()
            for idx, prob in enumerate(y_pred_list_ori[0]):
                if idx == 8:
                    continue
                field = self.labels_map["id2label"][idx]
                proba[field] = prob
                if prob >= THRESHOLD[field]:
                    label.append(field)

            # y_pred = np.where(y_pred_list >= self.threshold, 1, 0)[0]
            # indices = np.where(y_pred == 1)[0].tolist()
            # for idx in indices:
            #     if idx == 8:
            #         continue
            #     label.append(self.labels_map["id2label"][idx])
        return {
            "label": label,
            "prob": proba
        }


class SFGZInference(object):
    def __init__(self, model_name_or_path, device, model_type):
        self.device = device
        config_class, model_class, tokenize_class = MODEL_CLASSES[model_type]
        self.tokenizer = tokenize_class.from_pretrained(model_name_or_path)
        self.config = config_class.from_pretrained(model_name_or_path, num_labels=2)
        self.model = model_class.from_pretrained(
            model_name_or_path,
            config=self.config
        )
        self.model.to(self.device)
        self.model.eval()

    def predict(self, text):
        inputs = self.tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
        with torch.no_grad():
            for key, val in inputs.items():
                inputs[key] = val.to(self.device)
            outputs = self.model(**inputs)
            prob = torch.softmax(outputs.logits, dim=1).cpu().detach().numpy()[0][1]
            return float(prob)


class ClassifyHandler(Resource):

    def __init__(self, multi_inference, sfgz_inference=None):
        self.multi_inference = multi_inference
        self.sfgz_inference = sfgz_inference

    @logger.catch(reraise=True)
    def post(self):
        # 1. 请求头验证
        if request.content_type != "application/json":
            return self.custom_response("请求头错误", 400)

        # 2. 请求解析
        try:
            param_raw = request.get_json()
        except BadRequest:
            return self.custom_response("内容解析错误", 400)

        # 3. 请求参数获取
        try:
            text = param_raw["text"]
        except KeyError as e:
            logger.warning(e)
            return self.custom_response("text参数获取失败", 400)

        # 5. 模型预测
        try:
            result = self.multi_inference.predict(text)
            prob = self.sfgz_inference.predict(text)
            result["prob"]["是否有故障"] = prob
            if prob > THRESHOLD["是否有故障"]:
                result["label"].append("是否有故障")

        except Exception as e:
            logger.error(e)
            return self.custom_response("内部服务错误", 500)

        logger.info(f"预测结果\n{json.dumps(result, ensure_ascii=False, indent=2)}")
        # 6. 返回结果
        return self.custom_response(result, 200)

    @staticmethod
    def custom_response(data, code=200):
        if code != 200:
            data = {
                "message": data,
                "data": [],
                "status": 0,
            }
        else:
            data = {
                "message": "success",
                "data": data,
                "status": 1
            }
        resp = make_response(json.dumps(data, ensure_ascii=False), code)
        resp.headers["Content-Type"] = "application/json"
        return resp


if __name__ == '__main__':
    logger.add("logs/{time}.log", rotation="500 MB", retention="1 month", level="DEBUG")

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "5"
    device_out = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    sfgz_inference_out = SFGZInference(
        model_name_or_path="/data/classify/chinese-roberta-wwm-ext-large_06_01_sfgz",
        device=device_out,
        model_type="bert",
    )
    multi_inference_out = MultiLabelInferenceModel(
        model_name_or_path="/data/classify/0.860",
        device=device_out,
        model_type="bert",
        label_path="/note/nlp_algo/app/data/competition/readme.txt"
    )

    api.add_resource(ClassifyHandler, '/nlp/classify/predict', resource_class_kwargs={
        "multi_inference": multi_inference_out,
        "sfgz_inference": sfgz_inference_out,
    })
    # Gevent > uwsgi > gunicorn > flask built-in server
    app.run("0.0.0.0", 5000, debug=False)
