from flask import Flask, jsonify, request
from flask_cors import CORS
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from time import time
import os
import logging
from logging.handlers import RotatingFileHandler


root_path = 'D:\\work\\code\\clark\\gitee\\py_llm'
os.chdir(root_path)

app = Flask(__name__)
CORS(app)

# # 配置logging
os.mkdir('logs') if not os.path.exists('logs') else None
logger = logging.getLogger('qwen2.5_app_log')
logger.setLevel(logging.INFO)  # 设置日志级别为DEBUG，也可以根据需要设置为INFO、WARNING等

# 日志文件最大为10MB，并保留5个备份文件
max_bytes = 10 * 1024 * 1024  # 10MB
backup_count = 5
handler = RotatingFileHandler(filename='logs/app.log', maxBytes=max_bytes, backupCount=backup_count)
handler.setLevel(logging.INFO)  # 设置处理器级别为DEBUG

formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)

logger.addHandler(handler)

# 设置 keep-alive 超时时间为 60 秒
app.config.KEEP_ALIVE_TIMEOUT = 6000

# 加载模型
model_name = "D:\\work\\program\\pytorch_models\\Qwen2.5-3B-Instruct"

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)
# model = model.bfloat16().cuda()

tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")


@app.route('/qwen_chat', methods=['POST'])
def qwen_chat():
    try:
        prompt = request.json['text']
        messages = [
            {"role": "system", "content": "你是人工智能助手！"},
            {"role": "user", "content": prompt}
        ]

        start_time = time()

        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

        generated_ids = model.generate(
            **model_inputs,
            max_new_tokens=512,
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        end_time = time()

        print(f"Cost time: {end_time - start_time:.2f} seconds")

        print({'code': 'SUCCESS', 'cost_time': f'{end_time - start_time:.2f} seconds', 'response': response})
        logger.info({'code': 'SUCCESS', 'cost_time': f'{end_time - start_time:.2f} seconds', 'response': response})
        return jsonify({'code': 'SUCCESS', 'cost_time': f'{end_time - start_time:.2f} seconds', 'response': response})
    except Exception as e:
        print({'code': 'FAIL', 'error': str(e)})
        logger.error({'code': 'FAIL', 'error': str(e)})
        return jsonify({'code': 'FAIL', 'error': str(e)})


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8800, threaded=True)
