import json
import os
from config import PROJ_TOP_DIR
import os
import time
from typing import Literal
from openai import  OpenAI,AzureOpenAI
GPT4_AZURE_OPENAI_KEY='126b11dac9994145a291b9f0a37e53d7'
GPT35_AZURE_OPENAI_KEY='fb149defd77c469ea86e54dc2e34b794'
class OpenAI_LLM:
    '''
    功能：支持下列模型
        微软Azure(https://portal.azure.com):
            'gpt-3.5-turbo-1106_azure': 'GPT-35',
            'gpt-4-1106-preview_azure': 'GPT4',
        API2D(https://api2d.com/wiki/doc):
            'gpt-3.5-turbo-1106_api2d': 'gpt-3.5-turbo-1106',
            'gpt-3.5-turbo-16k-0613_api2d': 'gpt-3.5-turbo-16k-0613',
            'gpt-3.5-turbo-16k_api2d': 'gpt-3.5-turbo-16k',
            'gpt-3.5-turbo-0613_api2d': 'gpt-3.5-turbo-0613',
            'gpt-3.5-turbo_api2d': 'gpt-3.5-turbo',
            'gpt-3.5-turbo-0301_api2d': 'gpt-3.5-turbo-0301',
            'gpt-4-1106-preview_api2d': 'gpt-4-1106-preview',
            'gpt-4-0613_api2d': 'gpt-4-0613',
            'gpt-4_api2d': 'gpt-4',
        合肥开发团队:
            'qwen-72b-chat_hefei': 'qwen-72b-chat',
            'qwen-14b-chat_hefei': 'qwen-14b-chat',
    '''

    def __init__(self, model_name):
        '''
        输入格式：
            model_name: 字符串，表示模型的名称（见功能说明）。
                        当前支持：gpt-3.5-turbo-1106_azure, gpt-4-1106-preview_azure, gpt-3.5-turbo-1106_api2d, gpt-4-1106-preview_api2d, gpt-3.5-turbo-16k-0613_api2d, gpt-3.5-turbo-16k_api2d,
                        gpt-3.5-turbo-0613_api2d, gpt-3.5-turbo_api2d, gpt-4-0613_api2d, gpt-4_api2d, qwen-72b-chat_hefei, qwen-14b-chat_hefei,
                        moonshot-v1-32k_kimi, moonshot-v1-128k_kimi, moonshot-v1-8k_kimi,
            system_prompt: 字符串，表示系统的指令说明，定义了'role': 'system'对应的设定内容
                           当其为None时，调用默认的设置初始化
        '''
        self.model_name = model_name
        if model_name.endswith("_hefei"):
            self.client = OpenAI(
                base_url="https://dev.iai007.cloud/ai/api/v1",
                api_key="Hh8VazfByLx3fONHdrW_6muIBvfhZhfh",
            )
            self.model = model_name.lower().split("_")[0]

        elif model_name.endswith("_api2d"):
            self.client = OpenAI(
                base_url="https://openai.api2d.net/v1",
                api_key="fk204884-QxqDEvomnE6PRVe6WBxEnVcC8v88TxSL",
            )
            self.model = model_name.split("_")[0]
            if "gpt-4" in model_name:
                now = time.localtime()
                current_date = time.strftime("%Y-%m", now)
                self.system_prompt = f'You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2023-04\nCurrent date: {current_date}'
            elif "gpt-3.5" in model_name:
                now = time.localtime()
                current_date = time.strftime("%Y-%m", now)
                self.system_prompt = f'You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent date: {current_date}'


        elif model_name.endswith("_azure"):
            if "gpt-4" in model_name:
                self.client = AzureOpenAI(
                    azure_endpoint="https://zhishenggpt40.openai.azure.com/",
                    api_key=GPT4_AZURE_OPENAI_KEY,
                    api_version="2024-02-15-preview",
                )
                self.model = "GPT4"
                now = time.localtime()
                current_date = time.strftime("%Y-%m", now)
                self.system_prompt = f'You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2023-04\nCurrent date: {current_date}'
            elif "gpt-3.5" in model_name:
                self.client = AzureOpenAI(
                    azure_endpoint="https://zhishenggpt.openai.azure.com/",
                    api_key=GPT35_AZURE_OPENAI_KEY,
                    api_version="2024-02-15-preview",
                )
                self.model = "GPT-35"
                now = time.localtime()
                current_date = time.strftime("%Y-%m", now)
                self.system_prompt = f'You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent date: {current_date}'
            else:
                raise ValueError(f"Unsupported model name: {model_name}")

        elif model_name.endswith("_kimi"):
            # moonshot-v1-8k_kimi、moonshot-v1-32k_kimi、moonshot-v1-128k_kimi
            self.system_prompt = '你是 Kimi，由 Moonshot AI 提供的人工智能助手，你更擅长中文和英文的对话。你会为用户提供安全，有帮助，准确的回答。同时，你会拒绝一切涉及恐怖主义，种族歧视，黄色暴力等问题的回答。Moonshot AI 为专有名词，不可翻译成其他语言。'
            self.client = OpenAI(
                base_url="https://api.moonshot.cn/v1",
                api_key="sk-mCiXwCJ3etuPIAP188RebYQiYSHeoayj9HDWk5cQP0n0sNWr",
            )
            self.model = model_name.split("_")[0]


        else:
            raise ValueError(f"Unsupported model name: {model_name}")

    def _call(
            self,
            messages,
            generation_config=None,
            temperature=0.7,
            max_tokens=4096,
            top_p=0.95,
            frequency_penalty=0,
            presence_penalty=0,
            stop=None,
            stream=False,
            add_system_prompt=False,
    ):
        if add_system_prompt:
            # 强制检查系统Prompt并且添加到messages的开头
            if self.model_name.endswith("_api2d"):
                if messages[0]["role"] != "system":
                    # 如果传入的messages不存在system_prompt，则添加system_prompt
                    messages = [{"role": "system", "content": self.system_prompt}] + messages  # 拼接system_prompt

            elif self.model_name.endswith("_azure"):
                if messages[0]["role"] != "system":
                    # 如果传入的messages不存在system_prompt，则添加system_prompt
                    messages = [{"role": "system", "content": self.system_prompt}] + messages  # 拼接system_prompt

            else:
                if messages[0]["role"] != "system":
                    # 如果传入的messages不存在system_prompt，则添加system_prompt
                    messages = [{"role": "system", "content": self.system_prompt}] + messages  # 拼接system_prompt

        completion = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            temperature=temperature,
            max_tokens=max_tokens,
            top_p=top_p,
            frequency_penalty=frequency_penalty,
            presence_penalty=presence_penalty,
            stop=stop,
            stream=stream  # 流式返回
        )

        return completion


if __name__ == "__main__":
    llm = OpenAI_LLM('gpt-4-1106-preview_azure')
    json_dir = os.path.join(PROJ_TOP_DIR, "docs", "json")
    abstract_dir = os.path.join(PROJ_TOP_DIR, "docs", "abstract")
    json_names = os.listdir(json_dir)
    k=0
    for j in json_names:
        json_path = os.path.join(json_dir, j)
        content = ''
        with open(json_path, "r") as f:
            data = json.load(fp=f)
            content_all = data["knowledges"]
            for i in content_all.values():
                content=content+i
        with open(json_path, "r+") as f:
            data = json.load(fp=f)
            abstract=llm._call([{"role":"system","content":'请总结以下内容：'+content}]).choices[0].message.content
            data.update({"abstract": abstract})
            f.seek(0)
            json.dump(data, fp=f, indent=4, ensure_ascii=False)
        k = k + 1
        print(k)