import sys
from concurrent.futures import ThreadPoolExecutor
from config import InferenceConfig
from inference import LlamaInterface
import os

now_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(now_dir)
tokenizer_dir = os.path.join(project_dir, "chatglm3-6b")
model_dir = os.path.join(now_dir, "model", "chatglm3-6b.om")

config = InferenceConfig(
    tokenizer=tokenizer_dir,
    model=model_dir,
)
infer_engine=LlamaInterface(config)

def inference_cli():
    print("\n欢迎使用ChatGLM聊天机器人，输入exit或者quit退出，输入clear清空历史记录")
    history = []
    while True:
        input_text = input("Input: ")
        if input_text in ["exit", "quit", "exit()", "quit()"]:
            break
        if input_text == 'clear':
            history = []
            print("Output: 已清理历史对话信息。")
            continue
        print("Output: ", end='')
        response = ""
        is_first = True
        first_token_lantency, decode_speed = 0, 0
        for (
                new_text,
                first_token_lantency,
                decode_speed,
                total_speed
            ) in infer_engine.stream_predict(input_text, history):
            if is_first:
                if len(new_text.strip()) == 0:
                    continue
                is_first = False
            print(new_text, end='', flush=True)
            response += new_text
        print("")
        print(
            "[INFO] first_token_lantency: {:.4f}s,".format(first_token_lantency),
            " decode_speed: {:.2f} token/s, ".format(decode_speed),
            " total_speed(prefill+decode): {:.2f} token/s".format(total_speed),
        )
        history.append({"role": "user", "content": input_text})
        history.append({"role": "assistant", "content": response})
        
def main():
    if len(sys.argv) > 1:
        inference_cli()
        return
    from flask import Flask, request, jsonify
    from flask import render_template  
    from flask_cors import CORS
    pool = ThreadPoolExecutor(max_workers=2)        
    app = Flask(
        __name__,
        static_folder='./dist',  
        template_folder="./dist",
        static_url_path=""
    )

    CORS(app, resources=r'/*')
    
    @app.route('/')
    def index():
        return render_template('index.html', name='index')

    @app.route("/api/chat", methods=["POST"])
    def getChat():
        msg = request.get_json(force=True)['message']
        if len(msg) == 0:
            return jsonify({"code": 404})
        print(msg)
        pool.submit(infer_engine.predict,msg)
        return jsonify({"code": 200})

    @app.route("/api/getMsg", methods=["GET"])
    def getMsg():
        return jsonify(infer_engine.getState())
    
    @app.route("/api/reset", methods=["GET"])
    def reset():
        infer_engine.reset()
        return jsonify({"code": 200})

    app.run(
        use_reloader=False,
        host="0.0.0.0",
        port=5000
    )

if __name__ == '__main__':
    # main()
    inference_cli()
