import os
import time
import json
import requests
import pandas as pd
from app.client.OpenaiClient import openai_client
from feapder.utils.tools import get_file_list, get_md5
import nb_log

logger = nb_log.get_logger("ParallelKit")
nb_log.get_logger("fastapi")
nb_log.get_logger("uvicorn")

# nb_log.get_logger(None)


root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

session = requests.Session()


# 64位ID的划分
WORKER_ID_BITS = 5
DATACENTER_ID_BITS = 5
SEQUENCE_BITS = 12

# 最大取值计算
MAX_WORKER_ID = -1 ^ (-1 << WORKER_ID_BITS)  # 2**5-1 0b11111
MAX_DATACENTER_ID = -1 ^ (-1 << DATACENTER_ID_BITS)

# 移位偏移计算
WOKER_ID_SHIFT = SEQUENCE_BITS
DATACENTER_ID_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS
TIMESTAMP_LEFT_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS + DATACENTER_ID_BITS

# 序号循环掩码
SEQUENCE_MASK = -1 ^ (-1 << SEQUENCE_BITS)

# Twitter元年时间戳
TWEPOCH = 1288834974657


# 装饰器
class Singleton(object):
    def __init__(self, cls):
        self._cls = cls
        self._instance = {}

    def __call__(self, *args, **kwargs):
        if self._cls not in self._instance:
            self._instance[self._cls] = self._cls(*args, **kwargs)
        return self._instance[self._cls]


@Singleton
class IdWorker(object):
    """
    用于生成IDs
    """

    def __init__(self, datacenter_id, worker_id, sequence=0):
        """
        初始化
        :param datacenter_id: 数据中心（机器区域）ID
        :param worker_id: 机器ID
        :param sequence: 其实序号
        """
        # sanity check
        if worker_id > MAX_WORKER_ID or worker_id < 0:
            raise ValueError("worker_id值越界")

        if datacenter_id > MAX_DATACENTER_ID or datacenter_id < 0:
            raise ValueError("datacenter_id值越界")

        self.worker_id = worker_id
        self.datacenter_id = datacenter_id
        self.sequence = sequence

        self.last_timestamp = -1  # 上次计算的时间戳

    def _gen_timestamp(self):
        """
        生成整数时间戳
        :return:int timestamp
        """
        return int(time.time() * 1000)

    def get_id(self):
        """
        获取新ID
        :return:
        """
        timestamp = self._gen_timestamp()

        # 时钟回拨
        if timestamp < self.last_timestamp:
            logger.error(
                f"clock is moving backwards. Rejecting requests until {self.last_timestamp}"
            )

        if timestamp == self.last_timestamp:
            self.sequence = (self.sequence + 1) & SEQUENCE_MASK
            if self.sequence == 0:
                timestamp = self._til_next_millis(self.last_timestamp)
        else:
            self.sequence = 0

        self.last_timestamp = timestamp

        return (
            ((timestamp - TWEPOCH) << TIMESTAMP_LEFT_SHIFT)
            | (self.datacenter_id << DATACENTER_ID_SHIFT)
            | (self.worker_id << WOKER_ID_SHIFT)
            | self.sequence
        )

    def _til_next_millis(self, last_timestamp):
        """
        等到下一毫秒
        """
        timestamp = self._gen_timestamp()
        while timestamp <= last_timestamp:
            timestamp = self._gen_timestamp()
        return timestamp


def get_snow_flake(worker_id: int = 2):
    worker = IdWorker(1, worker_id, 0)
    return worker.get_id()


batch_id = get_snow_flake()


def transform_kb_data_with_pandas(kb_data):
    df = pd.DataFrame(kb_data)
    grouped_df = df.groupby("kb_name")["kb_id"].apply(list).reset_index(name="kb_ids")
    result = grouped_df.set_index("kb_name")["kb_ids"].to_dict()
    return result


def read_config(knowledge_name):
    """读取并解析配置文件"""
    try:
        file_path = os.path.join(root_dir, "bot_conf", f"{knowledge_name}_BOT.json")
        with open(file_path, "r", encoding="utf-8") as f:
            config = json.loads(f.read())
        return config
    except (FileNotFoundError, json.JSONDecodeError) as e:
        raise Exception(f"读取配置文件出错: {e}")


def generate_chat_completion(prompt,system_message=None, model="gpt-4o", temperature=0, top_p=1):
    """
    通过给定的client，使用指定的模型和参数生成聊天完成内容。

    :param client: API客户端对象，用于发起请求。
    :param prompt: 用户的输入或问题，作为生成的起始内容。
    :param model: 要使用的模型名称，默认为"gpt-3.5-turbo-16k"。
    :param temperature: 采样温度，控制输出的随机性，0表示最小随机性，默认为0。
    :param top_p: 核采样中的最高概率，用来截断候选词的概率分布，默认为1。
    :return: 从API得到的聊天完成响应对象。
    """
    messages=[{"role": "user", "content": prompt}]
    if system_message:
        messages.insert(0, system_message)
    response = openai_client.chat.completions.create(
        messages=messages,
        model=model,
        temperature=temperature,
        top_p=top_p,
    )
    return response
