from openai import OpenAI

api_key = "xxx"

client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")

# def generate_deepseek(content, system_prompt="You are a helpful assistant"):
#     # content = handle_content_input(content)
#     response = client.chat.completions.create(
#         model="deepseek-chat",
#         messages=[
#             {"role": "system", "content": system_prompt},
#             {"role": "user", "content": content},
#         ],
#         stream=False
#     )

#     # print(response.choices[0].message.content)
#     # print(response)
#     return response.choices[0].message.content


from openai import OpenAI
from multiprocessing import cpu_count

base_url = "https://api.deepseek.com"
api_key = "xxxxx"
model_name = 'deepseek-chat'


client = OpenAI(
    # 中转的url地址
    base_url=base_url,
    # 修改为自己生成的key
    api_key=api_key
)


def _generate_deepseek(
        query_str,
        model_name=model_name,
        n=1,
        temperature=0.7,
        top_p=1,
        top_k=-1,  # Note: OpenAI API doesn't use `top_k`
        max_new_tokens=2048,
        stop_token_ids=None,  # Note: OpenAI API uses `stop` instead of `stop_token_ids`
        stop_str=None,
        include_stop_str_in_output=False,  # OpenAI API does not have a direct parameter for this
        # base_url,  # Default OpenAI base URL
        # api_key
):
    # Construct the OpenAI API payload
    response = client.chat.completions.create(
        model=model_name,
        messages=[{"role": "user", "content": query_str}],
        n=n,
        temperature=temperature,
        top_p=top_p,
        # timeout=180,
        max_tokens=max_new_tokens,
        stop=stop_str if isinstance(stop_str, list) else [stop_str] if stop_str else None,
        logprobs=True,  # Enable logprobs for token-level probabilities
    )
    result = [choice.message.content for choice in response.choices]
    if len(result) == 1:
        result = result[0]
    return result



import concurrent.futures
from typing import List, Any

def generate_deepseek(query_str,
        model_name=model_name,
        n=1,
        temperature=0.7,
        top_p=1,
        top_k=-1,  # Note: OpenAI API doesn't use `top_k`
        max_new_tokens=2048,
        stop_token_ids=None,  # Note: OpenAI API uses `stop` instead of `stop_token_ids`
        stop_str=None,
        include_stop_str_in_output=False,  # OpenAI API does not have a direct parameter for this
        # base_url,  # Default OpenAI base URL
        # api_key) -> List[Any]:
):
    """
    使用多线程执行 generate_deepseek 函数。

    参数：
        prompt (List[Any]): 包含所有 p 的列表。
        system_prompt_reward (Any): 传递给 generate_deepseek 的 system_prompt_reward 参数。
        max_workers (int): 线程池中的最大工作线程数。默认值为 5。

    返回：
        List[Any]: 包含所有 generate_deepseek 调用结果的列表。
    """
    
    def task(p):
        return _generate_deepseek(p, model_name, 1, temperature, top_p, top_k, max_new_tokens, stop_token_ids, stop_str, include_stop_str_in_output)
    
    results = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count()) as executor:
        # 提交所有任务到线程池
        future_to_p = {executor.submit(task, query_str): query_str for _ in range(n)}
        
        # 按完成顺序获取结果
        for future in concurrent.futures.as_completed(future_to_p):
            p = future_to_p[future]
            try:
                result = future.result()
                results.append(result)
            except Exception as e:
                print(f'生成 generate_deepseek({p}) 时出错: {e}')
                results.append(None)  # 或者根据需要处理异常
    if len(results) == 1:
        results = results[0]
    return results


