import json
import logging
import warnings
from configparser import ConfigParser

from maas.constants.http_method_name import HttpMethodName
from maas.http.api_client import ApiClient
from maas.model.api_request import ApiRequest

# 配置日志和警告
warnings.filterwarnings("ignore", category=UserWarning)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                    level=logging.INFO)
logger = logging.getLogger(__name__)


class LLM:

    def __init__(self, model_chosen: str):
        self._model_chosen = model_chosen
        self._config = self._build_config()
        self._api_client = ApiClient()
        self._request = self._build_api_request()

    def _build_config(self):
        # 模型配置
        config = ConfigParser()
        config.read('config.ini')
        return {
            'base_url': config.get(self._model_chosen, 'base_url', fallback="http://127.0.0.1:10605/v1"),
            'api_key': config.get(self._model_chosen, 'api_key', fallback="anykey"),
            'api_secret': config.get(self._model_chosen, 'api_secret', fallback="anysecret"),
            'modelname': config.get(self._model_chosen, 'model_name', fallback="Qwen3-14B"),
            'modelpath': config.get(self._model_chosen, 'model_path', fallback="/data/jinke/workspace/models/qwen3_14b_org"),
            'context_window': config.getint(self._model_chosen, 'context_window', fallback=65536),
            'max_workers': config.getint(self._model_chosen, 'max_workers', fallback=10),
        }

    def _build_api_request(self):
        api_request = ApiRequest(HttpMethodName.POST, self._config['base_url'])
        api_request.add_headers("Content-Type", "application/json")
        api_request.set_credential(self._config['api_key'], self._config['api_secret'])
        return api_request

    # 非流式输出
    def generate_response(self, user_input):
        api_client = self._api_client
        api_request = self._request

        logger.info(f"模型调用开始，当前模型为: {self._config.get("modelname")}")

        # 构造请求体，处理请求参数
        request_body = {
            "model": self._config['modelname'],
            "messages": [{
                "role": "user",
                "content": user_input
            }],
            "chat_template_kwargs": {"enable_thinking": False},
            "stream": False
        }

        json_body = json.dumps(request_body)
        api_request.set_json_body(json_body)
        # 响应体 res['text']数据结构
        # {"id":"chatcmpl-c9341e2bcc584bccb783ab790773e402","object":"chat.completion","created":1753767105,"model":"DeepSeek-R1-Distill-Qwen-32B",
        # "choices":[{"index":0,"message":{"role":"assistant","reasoning_content":null,"content":"<think>\n\n</think>\n\n您好！我是由中国的深度求索（DeepSeek）公司开发的智能助手DeepSeek-R1。如您有任何任何问题，我会尽我所能为您提供帮助。","tool_calls":[]},
        # "logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":6,"total_tokens":46,"completion_tokens":40,"prompt_tokens_details":null},"prompt_logprobs":null,"kv_transfer_params":null}
        try:
            res = api_client.send_request(api_request)
            if res.ok:
                text = res.json()
                message = text['choices'][0].get('message', {})
                content = message.get('content', '')
                if content:
                    logger.info(f"模型调用结果:\n {content}")
                    return content
        except Exception as e:
            logger.error(f"模型调用出错, 错误信息为: {str(e)}")
            return None

    def generate_response_stream(self, user_input):
        api_client = self._api_client
        api_request = self._request

        # 构造请求体，处理请求参数
        request_body = {
            "model": self._config['modelname'],
            "messages": [{
                "role": "user",
                "content": user_input
            }],
            "chat_template_kwargs": {"enable_thinking": False},
            "stream": True
        }

        json_body = json.dumps(request_body)
        api_request.set_json_body(json_body)
        # 响应体 res['text']数据结构
        # {"id":"chatcmpl-c9341e2bcc584bccb783ab790773e402","object":"chat.completion","created":1753767105,"model":"DeepSeek-R1-Distill-Qwen-32B","choices":[{"index":0,"message":{"role":"assistant","reasoning_content":null,"content":"<think>\n\n</think>\n\n您好！我是由中国的深度求索（DeepSeek）公司开发的智能助手DeepSeek-R1。如您有任何任何问题，我会尽我所能为您提供帮助。","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":6,"total_tokens":46,"completion_tokens":40,"prompt_tokens_details":null},"prompt_logprobs":null,"kv_transfer_params":null}

        try:
            response = api_client.send_request(api_request)
            for res in response.iter_lines(chunk_size=None, decode_unicode=True):
                if len(res) == 0:
                    continue
                if res[:5] != "data:":
                    continue
                if res == "data: [DONE]":
                    break

                text = json.loads(res[5:])
                if text['choices']:
                    delta = text['choices'][0].get('delta', {})
                    content = delta.get('content', '')
                    if len(content) > 0:
                        yield content
        except Exception as e:
            logger.error(f"模型调用出错, 错误信息为: {str(e)}")
            yield None

    def generate_multiturn_response(self, messages, model):

        api_client = self._api_client
        api_request = self._request

        # 构造请求体，处理请求参数
        request_body = {
            "model": self._config['modelname'],
            "messages": messages,
            "stream": False
        }
        json_body = json.dumps(request_body)
        api_request.set_json_body(json_body)
        try:
            res = api_client.send_request(api_request)
            if res.ok:
                text = res.json()
                message = text['choices'][0].get('message', {})
                content = message.get('content', '')
                if content:
                    logger.info(f"模型调用结果:\n {content}")
                    return content
        except Exception as e:
            logger.error(f"模型调用出错, 错误信息为: {str(e)}")
            return None

    # 返回模型配置信息
    def get_config(self):
        return self._config

if __name__ == "__main__":
    llm = LLM()
    # print(llm.generate_response("你是谁？"))
    # print(llm.generate_response_stream("你是谁？"))
    # 迭代生成器获取流式输出
    for chunk in llm.generate_response_stream("你是谁？"):
        if chunk is not None:
            print(chunk, end='', flush=True)  # 实时输出，不换行
        else:
            print("\n[流式传输结束或出错]")
