from flask import Flask, request, Response, stream_with_context
from flask_cors import CORS, cross_origin
from concurrent.futures import ThreadPoolExecutor
import os
import sys
import time

filepath = os.path.abspath(__file__)
dirpath = os.path.dirname(filepath)
print(filepath, dirpath)

executor = ThreadPoolExecutor(10)

basedir = os.path.abspath(os.path.dirname(__file__))

app = Flask(__name__)
CORS(app)

app.config['STATIC_FOLDER'] = 'static'
app.config['STATIC_URL_PATH'] = '/static'


@app.route('/api/')
def hello_world():
    return 'Hello, World!'


BertTask = None
sys.path.insert(0, dirpath + "/bert")
import numpy as np
import json
import bert_inference as bertinfer

vocab_file = f"{dirpath}/vocab.txt"
predict_file = f"{dirpath}/src.json"
output_path = dirpath
save_inter_result = True

QwenTask = None

from llama_cpp import Llama

Llama.from_pretrained(
    repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
    filename="*q8_0.gguf",
)

llm = Llama(
    model_path="/root/.cache/huggingface/hub/models--Qwen--Qwen1.5-0.5B-Chat-GGUF/snapshots/cfab082d2fef4a8736ef384dc764c2fb6887f387/qwen1_5-0_5b-chat-q8_0.gguf",
    chat_format="chatml",
    Verbose=True,
    split_mode=2,
    main_gpu=2,
    n_gpu_layers=16,
    n_threads=4,
    use_mmap=False,
)


def getQwenResult(text):
    res = llm.create_chat_completion(
        messages=[
            {
                "role": "system",
                "content": "You are a helpful assistant",
            },
            # {"role": "user", "content": "What is your name?"},
            # {"role": "user", "content": "Who are you"},
            # {"role": "user", "content": "What is AI"},
            {"role": "user", "content": text},
        ],
        max_tokens=512,
        # stop=["."],
        temperature=0.7,
        stream=True
    )
    # return res["choices"][0]["message"]["content"]
    return res


@app.route('/api/qwen', methods=['POST'])
def qwen():
    text = request.form['text']
    global BertTask
    if text.startswith("SRC:"):
        BertTask = text[4:]
        print("Source:", BertTask)
        jsonObj = {
            "data": [
                {
                    "title": "SRC",
                    "paragraphs": [
                        {
                            "context": BertTask,
                            "qas": [
                                {
                                    "question": "",
                                    "id": 0
                                }
                            ]
                        }
                    ]
                }
            ],
            "version": "1.1"
        }
        f = open(predict_file, 'w')
        f.write(json.dumps(jsonObj, indent=2))
        f.close()
        return {"result": "SRC", "status": "done"}
    if text.startswith("QA:"):
        if BertTask:
            question = text[3:]
            print("Question:", question)
            f = open(predict_file, "r")
            jsonStr = f.read()
            jsonObj = json.loads(jsonStr)
            jsonObj["data"][0]["paragraphs"][0]["qas"][0] = {
                "question": question,
                "id": 0
            }
            f.close()
            f = open(predict_file, "w")
            f.write(json.dumps(jsonObj, indent=2))
            f.close()
            print(" ********** preprocess test **********")
            eval_examples, eval_features = bertinfer.get_data(vocab_file, predict_file)
            print(" ******* run bert *******")
            res = bertinfer.inference_model(eval_features, np.int32, output_path, save_inter_result)
            print(" ********** postprocess **********")
            bertinfer.postprocess(eval_examples, eval_features, res, output_path)
            f = open(output_path + "/predictions.json", "r")
            jsonStr = f.read()
            jsonObj = json.loads(jsonStr)
            f.close()
            return {"result": jsonObj["0"], "status": "done"}
        else:
            return {"result": "You have not offer any information!", "status": "done"}
    if text.startswith("CLEAN:"):
        BertTask = None
        return {"result": "The information is removed.", "status": "done"}
    # global QwenTask
    # if QwenTask is not None:
    #     if QwenTask.done():
    #         result = QwenTask.result()
    #         QwenTask = None
    #         return {"result": result, "status": "done"}
    #     return "running"
    # QwenTask = executor.submit(getQwenResult, text)
    # return "running"
    res = getQwenResult(text)

    def generate():
        for chunk in res:
            delta = chunk['choices'][0]['delta']
            if 'content' in delta:
                yield f"event: message\ndata: {delta['content']}\n\n"

    response = Response(generate(), mimetype='text/event-stream')
    response.headers['Cache-Control'] = 'no-cache'
    response.headers['Connection'] = 'keep-alive'
    return response


# @app.route('/api/qwen', methods=['POST'])
def qwen_stream():
    text = request.form['text']
    print(text)

    def generate():
        while True:
            yield 'event: message\ndata: i = %d\n\n' % time.time()
            time.sleep(1)

    response = Response(generate(), mimetype='text/event-stream')
    response.headers['Cache-Control'] = 'no-cache'
    response.headers['Connection'] = 'keep-alive'
    # response.headers['Access-Control-Allow-Origin'] = '*'
    # response.headers['Access-Control-Allow-Credentials'] = "true"
    # return stream_with_context(generate())
    return response


YoloTask = None
YoloHHBTask = None

import torch

model = torch.hub.load("ultralytics/yolov5", "yolov5n")
model.eval()


def getYoloResult(imgName):
    path = basedir + "/static/yolo/"
    imgfile = path + imgName
    results = model(imgfile)
    results.save(save_dir=path + 'result/', exist_ok=True)
    return imgName


import yolo_inference as yoloinfer
import cv2

# input size
input_hight = 384
input_width = 640


def getYoloHHBResult(imgName):
    print(" ********** preprocess image **********")
    path = basedir + "/static/yolo/"
    imgfile = path + imgName
    original_image = cv2.imread(imgfile)
    rgb_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    image_preprocessed = yoloinfer.image_preprocess(np.copy(rgb_image), [input_hight, input_width])
    image_preprocessed = image_preprocessed / 255.0
    img_ndarray = np.array(image_preprocessed).astype("float32")
    img_ndarray = img_ndarray.transpose(2, 0, 1)
    img_ndarray.tofile("image_preprocessed.tensor", "\n")
    img_ndarray.tofile("image_preprocessed.bin")
    print(" ******* run yolov5 and postprocess *******")
    model_inference_command = "./yolov5n_example ./hhb_out_yolo/shl.hhb.bm image_preprocessed.bin"
    os.system(model_inference_command)
    print(" ********** draw bbox **********")
    bboxes = []
    with open("detect.txt", 'r') as f:
        x_min = f.readline().strip()
        while x_min:
            y_min = f.readline().strip()
            x_max = f.readline().strip()
            y_max = f.readline().strip()
            probability = f.readline().strip()
            cls_id = f.readline().strip()
            bbox = [float(x_min), float(y_min), float(x_max), float(y_max), float(probability), int(cls_id)]
            print(bbox)
            bboxes.append(bbox)
            x_min = f.readline().strip()
    image_data = yoloinfer.image_preprocess(np.copy(original_image), [input_hight, input_width])
    image = yoloinfer.draw_bbox(image_data, bboxes)
    cv2.imwrite("./static/yolo/result/hhb/" + imgName, image)
    return imgName


@app.route('/api/yolo', methods=['POST'])
def yolo():
    img = request.files.get('img')
    path = basedir + "/static/yolo/"
    img.save(path + img.filename)
    global YoloTask
    if YoloTask is not None:
        if YoloTask.done():
            result = YoloTask.result()
            YoloTask = None
            return {"result": result, "status": "done"}
        return "running"
    YoloTask = executor.submit(getYoloResult, img.filename)
    return 'running'


@app.route('/api/yolo_hhb', methods=['POST'])
def yolo_hhb():
    imgName = request.form["imgName"]
    print(imgName)
    global YoloHHBTask
    if YoloHHBTask is not None:
        if YoloHHBTask.done():
            result = YoloHHBTask.result()
            YoloHHBTask = None
            return {"result": result, "status": "done"}
        return "running"
    YoloHHBTask = executor.submit(getYoloHHBResult, imgName)
    return 'running'


if __name__ == '__main__':
    isFlask = True
    if isFlask:
        app.run(host="0.0.0.0")
    else:
        bertinfer.main()
