import json
import logging
from typing import Optional
from openai import OpenAI
import os
import re
import requests
import time
from random import choice
from redis import Redis
from uuid import uuid4
import hashlib
import numpy as np

r = Redis(host='192.168.1.92', port=6379, db=8)
r1 = Redis(host='192.168.1.92', port=6379, db=7)


def getjson(x):
    rklist = [r'```json\n(.*?)\n``', r'```\n(.*?)\n```', r'^(\{.*?\})\n\n', r'^(\{.*?\})$',
              r'^(\{.*?\})\n\n',
              r'(\{.*?\})']
    rklen = len(rklist)
    for i in range(rklen+1):
        if i == rklen:
            print(x, '\n\n||\n\n')
            return
        else:
            txt = re.findall(rklist[i], x, re.DOTALL)
            if len(txt):
                return txt[0]
            else:
                continue


def checkjson(x):
    rklist = [r"//.*", '^\s*\{\s*"score"\:\s*']
    rklist = [r"//.*", '[\n\t]*', '\,\s+\"explanation".+?\}']
    rklen = len(rklist)
    txt = x
    for i in range(rklen+1):
        if i == rklen:
            # print(txt)
            return
        else:
            txt = re.sub(rklist[i], "", txt)
        try:
            txtd = eval(txt)
            return txtd
        except:
            continue


def call_qwen_72b(prompt, max_retries, max_tokens):
    if type(prompt) == str:
        prompt = [{"role": "user", "content": prompt}]
    TOGETHER_API_KEY = '8c44a484e6d4a9c45b7ed694b688df7664d6ecd972b3cfdee699056c44f187fd'
    for _ in range(max_retries):
        try:
            client = OpenAI(
                api_key=TOGETHER_API_KEY,
                base_url='https://api.together.xyz/v1',
            )
            chat_completion = client.chat.completions.create(
                messages=prompt,
                model="Qwen/Qwen2-72B-Instruct",
                temperature=0.0001,
                # max_tokens=max_tokens

            )
            response = chat_completion.choices[0].message.content
            if response:
                return response
            else:
                print('='*100)
                print(chat_completion)
                print('='*100)
                print(prompt)
                print('='*100)
                raise
        except:
            time.sleep(10)


def call_model_rx(prompt, max_retries, response_json_schema: dict = None, max_tokens=2000):
    if type(prompt) == str:
        prompt = [{"role": "user", "content": prompt}]
    url = 'http://api.unicdata.cn:32125/v1/chat/completions/xuji4000'
    uuid = str(uuid4())
    data = {
        "messages": prompt,
        "requestId": uuid,
        "if_async": True,
        "max_tokens": 2000,
        "temperature": 0.001,
        "skip_retrieval": True,
        "stop_token_ids": [151643, 151644, 151645],
    }
    if response_json_schema:
        data['guided_json'] = response_json_schema
        data['response_format'] = {"type": "json_object"}

    for _ in range(max_retries):
        try:
            response = requests.post(url, json=data)
            break
        except:
            continue

    start_time = time.time()
    while True:
        response = r.get(uuid)
        if response:
            return response.decode('UTF-8')
        else:
            time.sleep(2)
        if time.time() - start_time > 300:  # 5 minutes
            return 'no kafka result'


def call_model_bak(prompt: list, response_json_schema: dict = None, max_tokens=2000):
    llm_url = "http://192.168.1.92:40435/v1"
    model = "Qwen2-7B"

    create_dict = {'model': model,
                   'messages': prompt, 'max_tokens': max_tokens}

    if response_json_schema:
        create_dict['extra_body'] = {
            "response_format": {"type": "json_object"},
            "guided_json": response_json_schema
        }

    client = OpenAI(
        base_url=llm_url,
        api_key="EMPTY",
    )
    completion = client.chat.completions.create(**create_dict)
    response = completion.choices[0].message.content

    with open(f'/home/jupyter/CZP/gpt-researcher/log/prompt_{os.environ.get("LOCALTIME")}.log', 'a+', encoding='UTF-8') as f:
        f.write(f'''
                {'='*10}{model}{'='*10}
                {prompt}
                {'='*20}
                {response}
                {'*'*20}
                
                ''')
    return response


def call_model_low(prompt: list, response_json_schema: dict = None, max_tokens=2000):
    llm_url = "http://192.168.2.53:30013/v1"
    model = "Qwen2-7B"

    create_dict = {'model': model,
                   'messages': prompt, 'max_tokens': max_tokens}
    if response_json_schema:
        create_dict['extra_body'] = {
            "response_format": {"type": "json_object"},
            "guided_json": response_json_schema
        }

    client = OpenAI(
        base_url=llm_url,
        api_key="EMPTY",
    )

    completion = client.chat.completions.create(**create_dict)

    response = completion.choices[0].message.content

    with open(f'/home/jupyter/CZP/gpt-researcher/log/prompt_{os.environ.get("LOCALTIME")}.log', 'a+', encoding='UTF-8') as f:
        f.write(f'''
                {'='*10}{model}{'='*10}
                {prompt}
                {'='*20}
                {response}
                {'*'*20}
                
                ''')

    return response


def predbailian(prompt, max_retries, response_json_schema: dict = None, max_tokens=2000):
    api_key_list = [
        "sk-fc0b61726c1d45b2a11946e8caa5ec62",
        "sk-fc0b61726c1d45b2a11946e8caa5ec62",
        "sk-fc0b61726c1d45b2a11946e8caa5ec62",
    ]

    if type(prompt) == str:
        prompt = [{"role": "user", "content": prompt}]
    for i in range(max_retries):
        key = choice(api_key_list)
        # print('='*10, key)
        client = OpenAI(
            api_key=key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        try:
            if response_json_schema:
                completion = client.chat.completions.create(
                    model="qwen-plus",
                    messages=prompt,
                    temperature=0.001,
                    # max_tokens=max_tokens,
                    tools=[
                        {
                            "type": "function",
                            "function": {
                                    "name": "schema",
                                    "description": "format_output",
                                    "parameters": response_json_schema
                            }
                        }
                    ]
                )
                try:
                    content = completion.choices[0].message.content
                    if len(content) < 2:
                        content = completion.choices[0].message.tool_calls[0].function.arguments
                except:
                    content = completion.choices[0].messages[0]['plugin_call']['arguments']
                return content
            else:
                completion = client.chat.completions.create(
                    model="qwen-plus",
                    messages=prompt,
                    temperature=0.001,
                    # max_tokens=max_tokens,
                )
                content = completion.choices[0].message.content
                return content
        except Exception as e:
            print(i, '='*10, key)
            print(e)
            time.sleep(10)

def predbailian_turbo(prompt, max_retries, response_json_schema: dict = None, max_tokens=2000):
    api_key_list = [
        "sk-79c4febbe6854062ae2eb64c87251453",
        "sk-79c4febbe6854062ae2eb64c87251453",
        "sk-79c4febbe6854062ae2eb64c87251453",

#         "sk-ed0c175e7bd34524a2b7505e320c9a34",
#         "sk-3580377de8d94b928a300ac20c98642e",
#         "sk-f49423ce608c4bb3b483b49ef31fc367",
#         "sk-8c67c702c071449288c982fedc3ecea3",
#         "sk-0207b61605d74faeaa5f3df078823ce7",
#         "sk-a7d88e20fc7b401cba116bd69eabe3e3",
#         "sk-aeef1907f9e84658b712575b9dcbb720",
#         "sk-a420821317b9448c9b5a555457a53892",
#         "sk-ddc1760485eb4249a89ac472fd658442",
#         "sk-0e83c8fd8ca24bbbb8e775ae6a1c6605",
#         "sk-2b6c8286892145e086409722a95977f0",
#         "sk-a56887aaf7f547a7814b85572d39f0a2"
    ]

    if type(prompt) == str:
        prompt = [{"role": "user", "content": prompt}]
    for i in range(max_retries):
        key = choice(api_key_list)
        # print('='*10, key)
        client = OpenAI(
            api_key=key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        try:
            if response_json_schema:
                completion = client.chat.completions.create(
                    model="qwen-turbo",
                    messages=prompt,
                    temperature=0.001,
                    # max_tokens=max_tokens,
                    tools=[
                        {
                            "type": "function",
                            "function": {
                                    "name": "schema",
                                    "description": "format_output",
                                    "parameters": response_json_schema
                            }
                        }
                    ]
                )
                try:
                    content = completion.choices[0].message.content
                    if len(content) < 2:
                        content = completion.choices[0].message.tool_calls[0].function.arguments
                except:
                    content = completion.choices[0].messages[0]['plugin_call']['arguments']
                return content
            else:
                completion = client.chat.completions.create(
                    model="qwen-plus",
                    messages=prompt,
                    temperature=0.001,
                    # max_tokens=max_tokens,
                )
                content = completion.choices[0].message.content
                return content
        except Exception as e:
            print(i, '='*10, key)
            print(e)
            time.sleep(10)
def call_model(prompt, model: str, max_retries: int = 10, response_json_schema: dict = None, max_tokens=2000, use_cache=True):
    result = None
    prompt_md5 = hashlib.md5((str(prompt) + str(response_json_schema) + model).encode('utf-8')).hexdigest()
    if use_cache:
        result = r1.get('prompt_cache:' + prompt_md5)
    if result:
        result = result.decode()
        print('get result from redis cache')
    else:
        if model == "rx-7b":
            result = call_model_rx(prompt, max_retries=max_retries,
                                   response_json_schema=response_json_schema, max_tokens=max_tokens)
        if model =="3090-7b":
            result = call_model_low(
                prompt, response_json_schema=response_json_schema, max_tokens=max_tokens)
        if model == "tongji-7b":
            result = call_model_bak(
                prompt, response_json_schema=response_json_schema, max_tokens=max_tokens)
        if model == "together-72b":
            result = call_qwen_72b(
                prompt, max_retries=max_retries, max_tokens=max_tokens)
        if model == "small":
            result = call_model_rx(prompt, max_retries=max_retries,
                                   response_json_schema=response_json_schema, max_tokens=max_tokens)
        if model == "medium":
            result = predbailian(
                prompt, max_retries=max_retries, response_json_schema=response_json_schema, max_tokens=max_tokens)
            
        if model == "bailian_turbo":
            result = predbailian_turbo(
                prompt, max_retries=max_retries, response_json_schema=response_json_schema, max_tokens=max_tokens)

        r1.set('prompt_cache:' + prompt_md5, result)

    return result


def text2vec(text):
    client = OpenAI(
        api_key='EMPYT',
        base_url="http://192.168.2.53:9997/v1",
    )
    embedding_result = client.embeddings.create(
        model="bge-large-zh-v1.5",
        input=text,
    ).data[0].embedding

    embedding_result = np.array(embedding_result)
    return embedding_result