import json
import requests
import time

"""
docker run -it -d --init --name kimi-free-api -p 8000:8000 -e TZ=Asia/Shanghai mirror.ccs.tencentyun.com/vinlic/kimi-free-api
docker pull mirror.ccs.tencentyun.com/vinlic/kimi-free-api:latest
"""


class ChatMetaData:

    def __init__(self,refresh_token:str,use_conversation_id:bool=False,use_search:bool=False) -> None:
        self.refresh_token = refresh_token
        self.use_search = use_search
        self.stream = False
        self.model = "kimi"
        self.conversation_id = "none"
        self.use_conversation_id = use_conversation_id
        self.IP = ""
        self.port = 8000
        # 单位: 秒
        self.last_req_unix_time = 0
        self.time_interval = 5
    
    def setKimiServer(self,IP:str,port:int=8000):
        self.IP = IP
        self.port = port
    
    def _get_headers(self):
        headers = {
            "Authorization": f"Bearer {self.refresh_token}",
            "Content-Type": "application/json"
        }
        return headers
    
    def _ensure_req_interval(self):
        # 限制请求频率
        now = int(time.time())
        delta = now - self.last_req_unix_time
        if delta < self.time_interval:
            time.sleep(self.time_interval)
            now = int(time.time())
            self.last_req_unix_time = now

    def request(self,data,api:str):
        self._ensure_req_interval()

        api_url = f"http://{self.IP}:{self.port}{api}"

        # 将数据转换为JSON格式
        json_data = json.dumps(data)
        # 发送POST请求
        response = requests.post(
            api_url, 
            headers=self._get_headers(), 
            data=json_data
        )

        content = ""
        # 解析响应数据
        if response.status_code == 200:
            result = response.json()
            # print(result)
            try:
                content = result["choices"][0]["message"]["content"]
            except Exception as e:
                content = ""
                raise Exception(f"get content error: {str(e)}, origin result: {result}")
            # print("Assistant Message:", content)
            # 后处理
            if self.use_conversation_id and self.conversation_id == "none":
                response_id = result.get("id")
                print("Response ID:", response_id)
                self.conversation_id = response_id
        else:
            print(f"Request failed with status code {response.status_code}")
            # print("Response:", response.text)
            content = response.text
        return content



# 文档解读
class KimiDoc:
    def __init__(self,meta:ChatMetaData) -> None:
        self.meta = meta
        self.api = "/v1/chat/completions"
    
    def _build_doc_req(self,doc_url:str,text:str):
        if self.meta.use_search:
            print("Warning: 您在文档询问中启动了联网搜索,可能会干扰文档的解读结果")
        # 构建请求数据
        self.req = {
            "model": self.meta.model, # 可以随意填写，或使用特定模型ID
            "messages": [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "file",
                            "file_url": {
                                "url": str(doc_url), # 文件URL
                            }
                        },
                        {
                            "type": "text",
                            "text": str(text)  # 用户问题
                        }
                    ]
                }
            ],
            "use_search": self.meta.use_search,
            "stream": self.meta.stream
        }
        # 对话 ID
        if self.meta.use_conversation_id:
            self.req["conversation_id"] = self.meta.conversation_id

    
    def Chat(self,doc_url:str,text:str):
        self._build_doc_req(doc_url,text)
        content = self.meta.request(self.req,self.api)
        return content


class KimiChat:

    def __init__(self,meta:ChatMetaData) -> None:
        self.meta = meta
        self.api = "/v1/chat/completions"
    
    def _build_chat_req(self,text:str):
        # 构建请求数据
        self.req = {
            "model": self.meta.model, # 可以随意填写，或使用特定模型ID
            "messages": [
                {
                    "role": "user",
                    "content": str(text),
                }
            ],
            "use_search": self.meta.use_search,
            "stream": self.meta.stream
        }
        # 对话 ID
        if self.meta.use_conversation_id:
            self.req["conversation_id"] = self.meta.conversation_id
    
    def Chat(self,text:str):
        self._build_chat_req(text)
        content = self.meta.request(self.req,self.api)
        return content


class KimiOCR:

    def __init__(self,meta:ChatMetaData) -> None:
        self.meta = meta
        self.api = "/v1/ocr/completions"
    
    def _build_ocr_req(self,img_url:str,text:str):
        if self.meta.use_search:
            print("Warning: 您在图片询问中启动了联网搜索,可能会干扰文档的解读结果")
        # 构建请求数据
        self.req = {
            "model": self.meta.model, # 可以随意填写，或使用特定模型ID
            "messages": [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": str(img_url), # 文件URL
                            }
                        },
                        {
                            "type": "text",
                            "text": str(text)  # 用户问题
                        }
                    ]
                }
            ],
            "use_search": self.meta.use_search,
            "stream": self.meta.stream
        }
        # 对话 ID
        if self.meta.use_conversation_id:
            self.req["conversation_id"] = self.meta.conversation_id
    
    def Chat(self,img_url:str,text:str):
        self._build_ocr_req(img_url,text)
        content = self.meta.request(self.req,self.api)
        return content


if __name__ == "__main__":
    token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJ1c2VyLWNlbnRlciIsImV4cCI6MTczMzQ3NDQ3NiwiaWF0IjoxNzI1Njk4NDc2LCJqdGkiOiJjcmUxM2I2Ymk3c2V0bDM5cTExMCIsInR5cCI6InJlZnJlc2giLCJzdWIiOiJjcW1ndXZ1Y3A3ZjFocmY4Y24xMCIsInNwYWNlX2lkIjoiY3FtZ3V2dWNwN2YxaHJmOGNuMGciLCJhYnN0cmFjdF91c2VyX2lkIjoiY3FtZ3V2dWNwN2YxaHJmOGNuMDAifQ.1RCp2YGziSbI6Yd3IwxhNrbPNT1Up4mFHRvPlb9qoC-azw3EPUk8WRmMBBFCc_6DC5twr0Ea6oE0SwP59Ok19g"

    meta = ChatMetaData(
        refresh_token=token,
        use_conversation_id=True,
        use_search=False
    )
    meta.setKimiServer("81.70.84.40",8000)

    text1 = """
请阅读这篇论文，然后依次完成下面的工作：
1. 请使用3-5个要点说明本文的主要创新点。如果创新点较多，可以使用更多的要点。
2. 除开主要创新点外，这篇论文是否提供或说明了其他重要的要点。包括但不限于：实验的深入分析、方法或算法的不足和限制、可供发展或应用推广的角度等。
3. 本文提出的算法或方法，是否是通过观察到某些模型行为或特征而引出的。比如，因观察模型表现出某些现象而提出了具体的改进方法。如果存在这种情况，请你说明被观察到的现象是什么？现象产生了怎样的影响？
"""

    print("1. 文档解读")
    resp = KimiDoc(meta).Chat(
        doc_url="https://arxiv.org/pdf/2409.03271",
        text=text1,
    )
    print(resp)

    # 您可以通过传递 meta 变量来保证一个 LLM 对话的连续性

    print("2. 对话继续")
    resp = KimiChat(meta).Chat("这篇文章的核心创新点是什么？")
    print(resp)

