# from transformers import AutoTokenizer, AutoModel
import os 
import json
from chunck import LangChain_Chunck
"""
 Alpaca 格式：
{
  "dataset_name": {
    "file_name": "dataset.json",
    "columns": {
      "prompt": "instruction",
      "query": "input",
      "response": "output",
      "system": "system",
      "history": "history"
    }
  }
}

ShareGPT 格式
{
  "dataset_name": {
    "file_name": "dataset.json",
    "formatting": "sharegpt",
    "columns": {
      "messages": "conversations",
      "system": "system",
      "tools": "tools"
    },
    "tags": {
      "role_tag": "from",
      "content_tag": "value",
      "user_tag": "human",
      "assistant_tag": "gpt"
    }
  }
}



"""


# 指定gpu显卡
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
 
SYSTEM_PROMPT = """
    你是一个能根据提供的文本内容生成QA对的机器人。以下是你的任务要求：
    1. 生成尽可能多的QA对。
    2. 每个QA对包含一个问题和一个尽可能详细的答案。
    3. 答案必须用简体中文。
    4. 生成的QA对不能重复。
    5. 使用json格式将QA对包裹起来，问题用"question"表示，答案用"answer"表示。
    
    示例格式：
    [{"question": "...","answer": "..."},{"question": "...","answer": "..."}]
    以下是给定的文本内容：
    """

 
# # 加载模型和分词器
# model_dir = "../../../../../chatglm3-6b"
# tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
# model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
# # 设置模型为评估模式
# model = model.eval()


# Please install OpenAI SDK first: `pip3 install openai`

from openai import OpenAI

client = OpenAI(api_key="sk-96f4037c33bf48fe8a56c0f253dc78fa", base_url="https://api.deepseek.com")


import re
        
def main():
    langchain_chunck = LangChain_Chunck("./data")
    chuncks = langchain_chunck(200,20)
    qa_pairs = []
    history = []
    
    # 生成QA对
    for chunck in chuncks:
        prompt = SYSTEM_PROMPT + f"{chunck.page_content} 请开始生成 QA 对:"
        # qa_text, history = model.chat(tokenizer, prompt, history=history)
        print("开始调用deepssek api")
        response = client.chat.completions.create(
            model="deepseek-chat",
            messages=[
                {"role": "system", "content": "You are a helpful assistant"},
                {"role": "user", "content": prompt},
            ],
            stream=False
        )
        json_string = response.choices[0].message.content
        json_string = json_string.strip()  # 去除首尾空格
        json_string = re.sub(r'\s+', '', json_string)  # 去除多余空格
        json_string = re.sub(r'[\n\r]', '', json_string)  # 去除换行符
        json_string = re.sub('json','',json_string)
        json_string = re.sub('```',"'''",json_string)
        print(eval(json_string))
        json_string = eval(json_string)

        try:
            data = json.loads(json_string)
            print("解析成功，JSON 数据：")
            print(data)
        except json.JSONDecodeError as e:
            print(f"JSON 解析失败: {e}")
            print(f"尝试解析的内容: {json_string}")
        qa_pairs.extend(data)
        break
        
    # print(data)
    script_dir = os.path.dirname(__file__)
    file_path = os.path.join(script_dir, "selfQA_extension.json")
    with open(file_path, "w", encoding="utf-8") as f:
        json.dump(qa_pairs, f, ensure_ascii=False, indent=4)
    print("QA对已保存至selfQA_extension.json文件")
 
 
if __name__ == '__main__':
    main()