# from flask import jsonify, request
# from flask_restful import Resource
# import os
# import torch
# from transformers import AutoTokenizer, AutoModel
# import gc
#
# # 手动触发 Python 垃圾回收机制
# def cleanup():
#     global model, tokenizer
#     del model
#     del tokenizer
#     gc.collect()
#     torch.cuda.empty_cache()
# # 设置模型和 tokenizer 的路径
# MODEL_PATH = os.environ.get('MODEL_PATH', '../ChatGLM3/chatglm3-6b')
# TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
#
# # 加载 tokenizer 和 model
# tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
# model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval()
#
# # 定义接口
# class Chatglm3(Resource):
#     def post(self):
#         # 从请求体中获取 prompt
#         data = request.get_json()
#         prompt = data.get('prompt', '')
#
#         # 确保 prompt 不为空
#         if not prompt:
#             return {"error": "Prompt cannot be empty."}, 400
#
#         # 推理过程
#         base_response, _ = model.chat(tokenizer, prompt)
#
#         # 返回格式化的响应
#         response = {
#             "content": base_response,
#             "role": "assistant"
#         }
#         cleanup()
#         return jsonify(response)
#
#
