from zhipuai import ZhipuAI
import logging
from typing import List, Dict
import json
import tiktoken

import sys
sys.path.append('/workspace/qanything_local')
from qanything_kernel.connector.llm.base import BaseAnswer, AnswerResult

# 离线配置tiktoken
tiktoken_cache_dir = r'/opt/tiktoken_cache'
cache_key = '9b5ad71b2ce5302211f9c61530b329a4922fc6a4'

# zhipu_api_key = [
#     '431a84b9f95ce1b1232ad80745e87cd7.U4ZpEonkOQV20mhq',
#     "a8e4c210a4ef7ca308ff70dbb1e16025.3vwICkC7sCUyc8OM"
#     ]
zhipu_api_key = 'b54acea28ec93bf29a3f263197e35cdd.Z3RNc7rcmhWaPVB9'

class ZhipuAILLM(BaseAnswer):
    model: str = "glm-4-flash" # "glm-3-turbo" # "glm-4"
    token_window: int = 8000
    offcut_token: int = 50
    truncate_len: int = 50
    max_token: int = 512
    temperature: float = 0.2
    top_p: float = 0.9
    stop_words: str = None
    history: List[List[str]] = []
    history_len: int = 2

    def __init__(self):
        super().__init__()
        self.client = ZhipuAI(api_key=zhipu_api_key)

    @property
    def _llm_type(self) -> str:
        return "using ZhipuAI API serve as LLM backend"

    @property
    def _history_len(self) -> int:
        return self.history_len

    def set_history_len(self, history_len: int = 10) -> None:
        self.history_len = history_len

    def num_tokens_from_messages(self, message_texts):
        encoding = tiktoken.encoding_for_model("gpt-3.5-turbo-0613")
        num_tokens = 0
        for message in message_texts:
            num_tokens += len(encoding.encode(message, disallowed_special=()))
        return num_tokens

    def num_tokens_from_docs(self, docs):
        encoding = tiktoken.encoding_for_model("gpt-3.5-turbo-0613")
        num_tokens = 0
        for doc in docs:
            num_tokens += len(encoding.encode(doc.page_content, disallowed_special=()))
        return num_tokens

    def _call(self, prompt: str, history: List[List[str]], streaming: bool = False) -> str:
        messages = [{"role": "user", "content": prompt}]
        if history:
            for pair in history:
                question, answer = pair
                messages.append({"role": "user", "content": question})
                messages.append({"role": "assistant", "content": answer})

        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                stream=streaming,
                max_tokens=self.max_token,
                temperature=self.temperature,
                top_p=self.top_p,
                stop=self.stop_words.split(',') if self.stop_words is not None else None,
            )

            if streaming:
                for event in response:
                    if not isinstance(event, dict):
                        event = event.model_dump()

                    if isinstance(event['choices'], List) and len(event['choices']) > 0:
                        event_text = event["choices"][0]['delta']['content']
                        if isinstance(event_text, str) and event_text != "":
                            delta = {'answer': event_text}
                            yield "data: " + json.dumps(delta, ensure_ascii=False)
            else:
                event_text = response.choices[0].message.content if response.choices else ""
                delta = {'answer': event_text}
                yield "data: " + json.dumps(delta, ensure_ascii=False)

        except Exception as e:
            logging.error(f"Error calling ZhipuAI API: {e}")
            delta = {'answer': f"{e}"}
            yield "data: " + json.dumps(delta, ensure_ascii=False)

        # finally:
        #     yield f"data: [DONE]\n\n"

    def generatorAnswer(self, prompt: str, history: List[List[str]] = [], streaming: bool = False) -> AnswerResult:

        response = self._call(prompt, history=[], streaming=streaming)
        
        complete_answer = ""
        if streaming:
            history.append([])
            for response_text in response:
                if response_text:
                    chunk_str = response_text[6:]
                    if not chunk_str.startswith("[DONE]"):
                        chunk_js = json.loads(chunk_str)
                        complete_answer += chunk_js["answer"]

                history[-1] = [prompt, complete_answer]
                answer_result = AnswerResult()
                answer_result.history = history
                answer_result.llm_output = {"answer": response_text}
                answer_result.prompt = prompt
                yield answer_result
        else:
            for response_text in response:
                if response_text:
                    chunk_str = response_text[6:]
                    if not chunk_str.startswith("[DONE]"):
                        chunk_js = json.loads(chunk_str)
                        complete_answer += chunk_js["answer"]
                        history.append([prompt, complete_answer])

                answer_result = AnswerResult()
                answer_result.history = history
                answer_result.llm_output = {"answer": response_text}
                answer_result.prompt = prompt
                yield answer_result


    async def non_stream_chat(self, prompt: str, history: List[List[str]] = [], model_name: str = 'glm-3-turbo', streaming: bool = False):
        messages = [
            {
                "role": "user", 
                "content": prompt
            }]

        try:
            response = self.client.chat.completions.create(
                model=model_name,
                messages=messages,
                stream=streaming,
                max_tokens=self.max_token,
                temperature=self.temperature,
                top_p=self.top_p,
                stop=self.stop_words.split(',') if self.stop_words is not None else None,
            )
            event_text = response.choices[0].message.content if response.choices else ""
        except Exception as e:
            logging.error(f"Error calling ZhipuAI API: {e}")
            event_text = f'{e}'

        return event_text

    def chat(self, prompt: str, history: List[List[str]] = [], model_name: str = 'glm-3-turbo', streaming: bool = False):
        messages = [
            {
                "role": "user", 
                "content": prompt
            }]

        try:
            response = self.client.chat.completions.create(
                model=model_name,
                messages=messages,
                stream=streaming,
                max_tokens=self.max_token,
                temperature=self.temperature,
                top_p=self.top_p,
                stop=self.stop_words.split(',') if self.stop_words is not None else None,
            )
            event_text = response.choices[0].message.content if response.choices else ""
        except Exception as e:
            logging.error(f"Error calling ZhipuAI API: {e}")
            event_text = f'{e}'

        return event_text



if __name__ == "__main__":

    llm = ZhipuAILLM()

    streaming = False
    chat_history = []
    
    import pandas as pd
    from html2text import html2text
    df = pd.read_csv(r'/workspace/qanything_local/qanything_kernel/row_data/poc_1.7w+_data/2024-03-29_市热线-知识库点点通信息_17603.csv', encoding='utf-8')

    # 将JSON数据写入文件
    n = 0
    with open('/workspace/qanything_local/qanything_kernel/row_data/poc_1.7w+_data/standard_qa_pair.json', 'w', encoding='utf-8') as json_file:
        QA = []
        for i in range(len(df)):
            if df.iloc[i]['内容分类'] == '问答类':
                continue
            else:
                if n >= 20:
                    break
                content = html2text(df.iloc[i]['内容'])
                print(content)
            
                prompt = """根据以下参考信息中的内容部分，帮我生成若干个标准问答对，其中生成问题对应的答案必须忠于内容部分，找到答案不需要总结，直接从内容部分截取出来。
                # 参考信息:
                {content}
                # 回答格式要求:
                将`问题`和`答案`放到json中，最后放到列表中输出，其他额外信息不要输出。
                """
                response = llm.chat(prompt=prompt.format(content=content), model_name='glm-3-turbo')
                print(response)
                try:
                    qa_list = eval(response.split('```json\n')[-1].split('\n```')[0])
                    for qa in qa_list:
                        qa['内容'] = content
                except Exception as e:
                    qa_list = []
                    pattern = r'\{([^{}]*)\}'
                    import re
                    # 使用findall函数找到所有匹配的子字符串
                    qa_text = re.findall(pattern, response)
                    for qa in qa_text:
                        dic_qa = json.loads('{' + qa + '}')
                        dic_qa['内容'] = content
                        qa_list.append(dic_qa)
                QA.extend(qa_list)
                n += 1
        json.dump(QA, json_file, ensure_ascii=False, indent=4)