import requests
import json
import time
from utils.code_parser import Parser
from llm.call_llm_default_prompt import default_format_latex_prompt, default_combine_rule_prompt, default_requirement
from configs.settings import REPLICATE_SETTINGS, SERVER_SETTINGS

import utils.html_parser as html_parser
import utils.pre_process as pre_process
##############
# 调用大模型接口
##############
def call_tgi_llm(inp, settings):
    """
    TGI LLM Engine
    """
    pass

def call_fake_llm(inp, settings=None):
    """
    Fake LLM Engine
    """
    return "this is a fake llm output"


def call_aixcoderServer_llm_streaming(inp, is_stream=True, settings=None):
    if settings == None or type(settings) != dict or "max_tokens" not in settings.keys():
        settings = {
            "max_tokens": SERVER_SETTINGS['default_output_length'],
        }
    headers = {
        'Content-Type': 'application/json',
    }
    json_data = {
        'model': 'tgi',
        'messages': [
            {
                'role': 'system',
                'content': '',
            },
            {
                'role': 'user',
                'content': inp,
            },
        ],
        'stream': is_stream,
        'max_tokens': settings['max_tokens'],
    }
    # print(json_data)
    response = requests.post(SERVER_SETTINGS['llm_url'] + '/v1/chat/completions', headers=headers, json=json_data, stream=is_stream)
    if is_stream:
        # 流式输出
        if response.status_code != 200:
            print("ERROR: Server Error with status code " + str(response.status_code))
            print(response.text)
            return None
        def generate():
            response.encoding = 'utf-8'
            for chunk in response.iter_lines(decode_unicode=True, chunk_size=1):
                fieldname, _, value = chunk.partition(":")
                if fieldname == "data":
                    # copy from https://github.com/openai/openai-python/blob/main/src/openai/_streaming.py#L353
                    if value.startswith(" "):
                        value = value[1:]
                    value = json.loads(value.strip())
                    if "error" in value:
                        print(value)
                        yield str(value)
                        return None
                    yield value['choices'][0]['delta']['content']
        return generate
    else:
        # 非流式输出
        if response.status_code != 200:
            print("ERROR: Server Error with status code " + str(response.status_code))
            print(response.text)
            return None
        response.encoding = 'utf-8'
        data = response.json()
        return data['choices'][0]['message']['content']
    



def call_aixcoderServer_generate_stream_7_1(inp, is_stream=True, settings=None):
    if settings == None or type(settings) != dict or "max_tokens" not in settings.keys():
        settings = {
            "max_tokens": SERVER_SETTINGS['default_output_length'],
        }
    headers = {
        'Content-Type': 'application/json',
    }
    json_data = {
        "inputs":inp,
        "parameters":{"max_new_tokens":settings['max_tokens'],"temperature": 0.001},
        "stream": True
    }
    # print(json_data)
    response = requests.post(SERVER_SETTINGS['llm_url_7_1'], headers=headers, json=json_data, stream=True)
    # print(response)
    # 模型端的输出方式只支持streaming这一种方式
    if response.status_code != 200:
        print("ERROR: Server Error with status code " + str(response.status_code))
        print(response.text)
        return None

    def generate():
        response.encoding = 'utf-8'
        for chunk in response.iter_lines(decode_unicode=True, chunk_size=1):
            fieldname, _, value = chunk.partition(":")
            if fieldname == "data":
                # copy from https://github.com/openai/openai-python/blob/main/src/openai/_streaming.py#L353
                if value.startswith(" "):
                    value = value[1:]
                value = json.loads(value.strip())
                if "error" in value:
                    print(value)
                    yield str(value)
                    return None
                if value['token']['text'] != "<|eot_id|>":
                    # skip_token = ['```c', '```']
                    # if value['token']['text'] in skip_token:
                    #     continue
                    yield value['token']["text"]
                else:
                    return None
    # TODO: 这里可以加入skip_ngram_generate，来跳过一些不必要的token，需要实现！
    def skip_ngram_generate(g_f, ngram = [['```','c'], ['```']]):
        cache_num = max([len(e) for e in ngram])
        cache = []
        for e in g_f():
            cache.append(e)
            if cache in ngram:
                cache = []
            if len(cache) > cache_num:
                this_out = cache.pop(0)
                yield this_out
        for e in cache:
            yield e
    # generate = skip_ngram_generate(generate)
    if is_stream:
        return generate
    else:
        output = ""
        for e in generate():
            output += e
        return output
# def call_aixcoderServer_llm_old(inp, settings=None):
#     """
#     # Fake LLM Engine
#     """
#     default_url = SERVER_SETTINGS['llm_url']
#     inp = {'text': inp}
#     response = requests.post(default_url, json=inp, timeout=3000)
#     if response.status_code != 200:
#         return "ERROR: Server Error with status code " + str(response.status_code)
#     try:
#         out = response.json()['pred_text']
#         return out
#     except Exception as e:
#         return "ERROR: Server Error: failed to parse response " + str(e)


def call_cog_llm(inp, settings=None):
    """
    Cog LLM Engine
    https://github.com/replicate/cog
    https://replicate.com/
    """
    if settings == None:
        # LLM默认setting
        settings = {
            "top_k": 10,
            "top_p": 0.95,
            "max_tokens": 4096,
            "temperature": 0.01, # greedy decoding
            "system_prompt": "",
            "repeat_penalty": 1.1,
            "presence_penalty": 0,
            "frequency_penalty": 0,
        }
    url = "https://api.replicate.com/v1/predictions"
    headers = {
        "Authorization": f"Token {REPLICATE_SETTINGS['REPLICATE_API_TOKEN']}",
        "Content-Type": "application/json",
    }
    data = {
        "version": REPLICATE_SETTINGS["REPLICATE_MODEL_VERSION"],
        "input": {**settings, "prompt": inp},
    }
    response = requests.post(url, headers=headers, data=json.dumps(data))
    # prediction_url= $(echo $output | jq -r '.urls.get')
    prediction_url = response.json()["urls"]["get"]
    while True:
        response = requests.get(prediction_url, headers=headers)
        status = response.json()["status"]
        if status == "succeeded":
            return "".join(response.json()["output"])
        if status == "failed":
            return None
        time.sleep(0.5)
    return None



def call_cog_llama3_llm(inp, settings=None, is_stream=False):
    """
    Cog LLM Engine
    https://github.com/replicate/cog
    https://replicate.com/
    """
    import os
    os.environ["REPLICATE_API_TOKEN"] = REPLICATE_SETTINGS['REPLICATE_API_TOKEN']
    import replicate
    input = {
        "prompt": inp,
        "prompt_template": "{prompt}\n",
        "presence_penalty": 0,
        "frequency_penalty": 0,
        "max_tokens": 4096,
    }
    output = ""
    def generate():
        for event in replicate.stream(
            "meta/meta-llama-3-70b-instruct",
            input=input
        ):
            yield str(event)
    if is_stream:
        return generate
    else:
        for e in generate():
            output += e
        return output
    
##############
# 构造prompt
##############
def format_latex_prompt_func(inp_string, prompt = ""):
    if prompt == None or prompt.strip() == "":
        prompt = default_format_latex_prompt
    # 这里不好用f-string, 因为输入的latex中可能包含{} 
    return prompt.replace("<INPUT_LATEX>", inp_string)

def combine_rule_prompt_func(inp_dict, prompt = ""):
    if prompt == None or prompt.strip() == "":
        prompt = default_requirement
    else:
        prompt = prompt
    return default_combine_rule_prompt.replace("<TABLE>", inp_dict['table_str']).replace("<pseudo_code>", inp_dict['pseudo_code']).replace("<c_code_draft>", inp_dict['c_code_draft']).replace("<REQUIREMENT>", prompt)



def tabletxt2md(table_txt):
    # TODO: 这里可以更好地整理下表格的格式
    return table_txt.replace("\t",'\t')

def merge_streaming(out_f, prefix_list, suffix_list):
    def output():
        for e in prefix_list:
            yield e
        for e in out_f():
            yield e
        for e in suffix_list:
            yield e
    return output

##############
# Agent主函数
##############
def rule_based_agent(inp, is_stream=False):
    """
    LLM Agent主函数
    """
    # 选择llm函数

    llm_func = lambda x: call_aixcoderServer_generate_stream_7_1(x[:SERVER_SETTINGS['default_input_charlength']], is_stream=False)
    llm_func_streaming = lambda x: call_aixcoderServer_generate_stream_7_1(x[:SERVER_SETTINGS['default_input_charlength']], is_stream=True)

    # llm_func = lambda x: call_cog_llama3_llm(x[:SERVER_SETTINGS['default_input_charlength']], is_stream=False)
    # llm_func_streaming = lambda x: call_cog_llama3_llm(x[:SERVER_SETTINGS['default_input_charlength']], is_stream=True)

    # llm_func = call_cog_llm
    # llm_func = call_fake_llm
    
    # 选择tool函数
    def tool_func(latex_str, other_info):
        try:
            pseudoCode = pre_process.pre_process(latex_str)
            pseudoCode = pre_process.add_semicolon(pseudoCode)
            print("预处理后的伪代码：\n{}".format(pseudoCode)+"\n"+"-"*50)
            parser = Parser(pseudoCode,other_info['config_path'],key_words=other_info['key_words'], llm_mode=True)
            parser.parse()
            code = parser.print_codes()
        except Exception as e:
            code = "ERROR:" + str(e)

        return code

    prompts = {
        "format_latex_prompt": inp['format_latex_prompt'],
        "combine_prompt": inp['combine_prompt'],
    }


    def format_latex_prompt_parser(out):
        # import ipdb; ipdb.set_trace()
        if "```" in  out:
            out = out.split("```")[1]
            if "```" in out:
                out = out.split("```")[0]
        return out
    history = []
    # import ipdb; ipdb.set_trace()
    
    # 第一步: LLM格式化latex
    # llm_inp = format_latex_prompt_func(inp['pseudo_code'], prompts['format_latex_prompt'])
    
    # formated_latex = llm_func(llm_inp)
    # history.append({"LLMinput": llm_inp, "LLMoutput": formated_latex})

    # 第二步: 调用tool函数
    # formated_latex = format_latex_prompt_parser(formated_latex)
    # tool_out = tool_func(formated_latex, other_info=inp)
    # history.append({"tool_input": formated_latex, "tool_output": tool_out})

    # 或者直接使用原始的pseudo_code生成的tool_out
    tool_out = inp['first_tool_out']

    # 第三步: LLM合并信息生成代码
    llm_inp = combine_rule_prompt_func({"pseudo_code": inp['pseudo_code'], "table_str": tabletxt2md(inp['table_str']), "c_code_draft": tool_out}, prompts['combine_prompt'])

    print("###################\nllm_inp:\n", llm_inp)

    print("###################\n")
    
    if is_stream:
        final_out_generator = llm_func_streaming(llm_inp)
        history.append({"LLMinput": llm_inp, "LLMoutput": "streaming output..."})
        
        # TODO: change here to include input in the editor
        # final_out = merge_streaming(final_out_generator, [llm_inp], []) 
        final_out = final_out_generator
    
    else:
        final_out = llm_func(llm_inp)
        history.append({"LLMinput": llm_inp, "LLMoutput": final_out})

    # print(history)
    # for e in history:
    #     print(e)
    #     print("\n")

    return final_out, history


def test_rule_based_agent():
    import time
    def test_get_llm_code_streaming(params):
        start_time=time.time()
        format_latex_prompt = params['format_latex_prompt'] if 'format_latex_prompt' in params else ""
        combine_prompt = params['prompt'] if 'prompt' in params else ""



        # copy from get_code
        pseudoCode = ""
        config_json = ""
        config_path = ""
        table_str = ""
        tool_out = ""
        key_words = []

        config_path='./configs/config_tmp_for_llm_code.json' # 临时保存json文件
        module_dict_str=pre_process.pre_process(params["dict"],True)
        if module_dict_str=="":
            module_dict_json=[{},{},{},{},"数据字典为空"]
        else:
            module_dict_json = html_parser.str_to_json_502_new(module_dict_str)
        project_dict_str=pre_process.pre_process(params["project_dict"])
        if project_dict_str!="":
            project_dict_json=html_parser.str_to_json_502_new(project_dict_str)
            module_dict_json = html_parser.merge_json(project_dict_json,module_dict_json) # merge json

        # 合并domainknowledge
        config_dict_str=params["configure_dict"]
        under_func_res,macro_res,key_word_config=html_parser.domain_to_json_502(config_dict_str)
        under_func_res.update(module_dict_json[1])
        module_dict_json[1]=under_func_res
        macro_res.update(module_dict_json[2])
        module_dict_json[2]=macro_res

        # default_dict_str=params["default_dict"]
        # under_func_res,macro_res,key_word_default=html_parser.domain_to_json_502(default_dict_str)
        # under_func_res.update(module_dict_json[1])
        # module_dict_json[1]=under_func_res
        # macro_res.update(module_dict_json[3])
        # module_dict_json[3]=macro_res      
        
        # 合并关键字
        # if key_word_default!={}:
        #     for key in key_word_default.keys():
        #         if key in key_word_config.keys():
        #             s=set(key_word_default[key])
        #             s.update(key_word_config[key])
        #             key_word_config[key]=list(s)
        #         else:
        #             key_word_config[key]=key_word_default[key]
        key_words=key_word_config
        config_json=module_dict_json
        # table_str=config_dict_str+project_dict_str+module_dict_str
        table_str = project_dict_str+module_dict_str # TODO: 可以修改这个内容来构造输入给大模型的数据字典
        with open(config_path, 'w',encoding="utf-8") as f:
            json.dump(config_json, f,ensure_ascii=False)
        pseudoCode = pre_process.pre_process(params["pseudoCode"])
        pseudoCode = pre_process.add_semicolon(pseudoCode)
        print("预处理后的伪代码：\n{}".format(pseudoCode)+"\n"+"-"*50)
        try:
            parser = Parser(pseudoCode,config_path,key_words=key_words, llm_mode=True)
            parser.parse()
            code = parser.print_codes()
        except Exception as e:
            code = "ERROR:" + str(e)

        # 使用第一次的工具输出作为tool_out
        tool_out = code
        try:
            # 构造输入
            agent_inp = {
                "pseudo_code": pseudoCode,
                "table_json": config_json,
                "config_path": config_path,
                "table_str": table_str,
                "format_latex_prompt": format_latex_prompt,
                "combine_prompt": combine_prompt,
                "first_tool_out": tool_out,
                "key_words": key_words
            }
            code, history = rule_based_agent(agent_inp, is_stream=True)
            end_time=time.time()
            print("streaming time:",end_time-start_time)
            # print("history:", history)
            # https://segmentfault.com/q/1010000043848847
            return code
        except Exception as e:
            return "ERROR:" + str(e)

    with open("/Users/zkcpku/Documents/seke/pretrain/latex2code/example_params/1719652066.0037792.get_code.params.json",'r') as f:
        params = json.load(f)
    tmp = test_get_llm_code_streaming(params)
    print(tmp)
    for c in tmp():
        print(c, end="", flush=True)

    

if __name__ == "__main__":
    # test_call_aixcoderServer_generate_stream_7_1()
    test_rule_based_agent()
    