import json
import os
import uuid

import requests
import logging
from flask import Flask, request, Response

logging.basicConfig(
    # 日志级别
    level=logging.INFO,
    # 日志格式
    # 时间、代码所在文件名、代码行号、日志级别名字、日志信息
    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
)

llm_url = os.environ.get('LLM_URL', 'http://localhost:8088')  # 'http://ai-dx.wair.ac.cn/service_modelview/api/92/'
if llm_url is None:
    raise Exception('LLM URL is not set.')

app = Flask(__name__)


def extract_message(req):
    messages = [{
        "role": "system",
        "content": "你是“紫东太初”全模态大模型，由中科院自动化所和武汉人工智能研究院推出新一代大模型，支持多轮问答、文本创作、图像生成、3D"
                   "理解、信号分析等全面问答任务，拥有更强的认知、理解、创作能力，带来全新互动体验。 "
    }]
    query = req.get('input_text', '')
    context = req.get('context')
    if context and len(context) > 0:
        context = context.strip()
        splits = context.split('###')
        for item in splits:
            question_prefix = "问题："
            ans_prefix = "答案："
            if item.startswith(question_prefix):
                messages.append({
                    "role": "user",
                    "content": item[len(question_prefix):]
                })
            elif item.startswith(ans_prefix):
                messages.append({
                    "role": "assistant",
                    "content": item[len(ans_prefix):]
                })
    messages.append({
        "role": "user",
        "content": query
    })
    return messages


# @app.route('/', methods=['POST'])
# def taichu_proxy():
#     req = request.json
#     logging.info(f'req:{req}')

#     max_tokens = req.get('max_new_tokens')
#     stream = req.get('do_stream', True)
#     stop = req.get('additional_eos')
#     query = req.get('input_text', '')
#     context = req.get('context', '')

#     presence_penalty = req.get('presence_penalty')
#     frequency_penalty = req.get('frequency_penalty')
#     repetition_penalty = req.get('repetition_penalty')
#     temperature = req.get('temperature')
#     top_p = req.get('top_p')
#     top_k = req.get('top_k')

#     openai_req = {
#         "model": "gpt-3.5-turbo",
#         "messages": extract_message(req),
#         "max_tokens": max_tokens,
#         "stream": stream,
#         "stop": stop,
#         "presence_penalty": presence_penalty,
#         "frequency_penalty": frequency_penalty,
#         "repetition_penalty": repetition_penalty,
#         "temperature": temperature,
#         "top_p": top_p,
#         "top_k": top_k,
#     }

#     url = llm_url + '/v1/chat/completions'

#     logging.info(f"proxy to  url:{url}")
#     excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection', 'host',
#                         'x-modelarts-latency', 'x-modelarts-service-version', 'x-modelarts-trace-id']
#     req_headers = {}
#     for (name, value) in request.headers.items():
#         if name.lower() not in excluded_headers:
#             req_headers[name] = value

#     resp = requests.request(method='POST', url=url,
#                             headers=req_headers,
#                             json=openai_req,
#                             timeout=10 * 60, stream=True)
#     resp_headers = [(name, value) for (name, value) in resp.raw.headers.items()
#                     if name.lower() not in excluded_headers]

#     if resp.status_code != 200:
#         return Response(resp.iter_content(chunk_size=1024 * 4), resp.status_code, resp_headers)

#     if not stream:
#         ret = resp.json()
#         content = ret['choices'][0]['message']['content']
#         full_context = context + f"\n###问题：{query}\n\n###答案：{content}"

#         return {"full_context": full_context,
#                 "query": query,
#                 "answer": content, "token_nums": len(content)}

#     def convert_resp():
#         ans = ''
#         index = 0
#         prefix = "data:"
#         uid = str(uuid.uuid4())
#         for line in resp.iter_lines():
#             data = line.decode()
#             if len(data) == 0:
#                 continue
#             if data == 'data: [DONE]':
#                 break
#             if data.startswith(prefix):
#                 data = data[len(prefix):]
#             try:
#                 json_data = json.loads(data)
#                 delta = json_data['choices'][0]['delta']
#                 if 'content' in delta:
#                     content = delta['content']
#                     ans += content
#                     yield json.dumps({"created": uid,
#                                       "choices": [{"text": content, "index": index, "logprobs": None,
#                                                    "finish_reason": None}]}, ensure_ascii=False) + '\r\n'
#                     index += 1
#             except:
#                 pass
#         full_context = context + f"\n###问题：{query}\n\n###答案：{ans}"
#         yield json.dumps({"full_context": full_context,
#                           "query": query,
#                           "answer": ans, "token_nums": index}, ensure_ascii=False) + '\r\n'

#     return Response(convert_resp(), resp.status_code, resp_headers)


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8080)
