import json
import os
import re
import time

from chonkie import RecursiveChunker, RecursiveRules
from openai import OpenAI
from transformers import AutoTokenizer


class qac_generate:
    def __init__(
        self,
        tokenizer_config,
        openai_config,
        generate_config,
        chunk_size=512,
        max_length=1000,
    ):
        # 加载分词器
        self.TOKENIZER_CONFIG = tokenizer_config
        self.OPENAI_CONFIG = openai_config
        self.GENERATE_CONFIG = generate_config
        self.tokenizer = AutoTokenizer.from_pretrained(self.TOKENIZER_CONFIG["model"])
        self.max_length = max_length
        self.chunk_size = chunk_size
        # 初始化分块器
        self.chunker = RecursiveChunker(
            self.tokenizer, chunk_size=self.chunk_size, rules=RecursiveRules()
        )
        # 初始化openai
        self.client = OpenAI(
            api_key=self.OPENAI_CONFIG["api_key"],
            base_url=self.OPENAI_CONFIG["base_url"],
        )

    def _text_clean(self, text: str) -> str:
        # 对markdown格式进行清洗
        clean_text = re.sub(r"<[^>]+>", " ", text)
        clean_text = re.sub(
            r"!\[Figure\]\(\)|colspan=\d+>|\s+Page \d+\s*\n---|#", "", clean_text
        )
        clean_text = re.sub(r"\n\d+\n", "\n", clean_text)
        # 匹配 (cid:数字) 序列
        matches = re.findall(r"\(cid:\d+\)", clean_text)
        if len(matches) > int(len(text) / 20):
            return ""
        else:
            clean_text = re.sub(r"\(cid:\d+\)", "", clean_text)
        # 将双引号改成单引号
        clean_text = clean_text.replace('"', "'")
        return clean_text

    def _chunk_text(self, text: str) -> list:
        # 以标题初步分块
        lines = text.split("\n")  # 按行分割文本
        chunks = []  # 用于存储最终的分块结果
        current_chunk = []  # 当前正在处理的块
        for line in lines:
            line = line.strip()  # 去除首尾空白字符
            if not line or re.match(r"^\d+\.\s*\d+\s*$", line):  # 如果是空行，跳过
                continue
            # 检查是否为标题行（以 "# " 开头）
            if line.startswith("# "):
                # 如果当前块不为空，保存当前块
                if current_chunk:
                    chunks.append("\n".join(current_chunk))
                    current_chunk = []
                # 添加标题到当前块
                current_chunk.append(line)
            else:
                # 如果不是标题行，且不是无效内容，则添加到当前块
                if not line.startswith("![](images/") and not line.startswith("#"):
                    current_chunk.append(line)
        # 添加最后一个块（如果存在）
        if current_chunk:
            chunks.append("\n".join(current_chunk))
        return chunks

    def _concatenate_chunks_to_max_length(self, chunks: list, max_length=1000) -> list:
        # 将多个文本块拼接成尽可能长的字符串，同时确保总长度不超过最大长度。
        result = []
        temp_str = ""
        for chunk in chunks:
            if not temp_str:
                temp_str = chunk
            else:
                # 检查拼接后的长度是否会超过max_length
                if len(temp_str) + len(chunk) <= max_length:
                    # 如果不会超过max_length，则拼接
                    temp_str += "\n" + chunk
                else:
                    # 如果超过了max_length，则将temp_str添加到结果列表中，并重置temp_str为当前元素
                    result.append(temp_str)
                    temp_str = chunk
            # 检查temp_str的长度是否已经达到max_length
            if len(temp_str) >= max_length:
                # 如果达到了max_length，则添加到结果列表中，并重置temp_str
                result.append(temp_str)
                temp_str = ""
        # 检查循环结束后temp_str是否还有剩余的字符串需要添加到结果列表中
        if temp_str:
            result.append(temp_str)
        return result

    # def _get_text_number(self, text: str, base_length=300) -> int:
    #     # 基于text长度获取问题数量,每多base_length个长度增加2个问题
    #     return int(len(text) / base_length)*2 + 2
    def _get_chunker_chunks(self, chunks: list, max_length=512) -> list:
        # 使用chunker对长度大于max_length的chunk进行再次切分
        result = []
        for chunk in chunks:
            if len(chunk) > max_length:
                chunks = self.chunker.chunk(chunk)
                for i in chunks:
                    result.append(i.text)
            else:
                result.append(chunk)
        return result

    def _call_gpt_with_retry(
        self, client, model, chunk, k, max_retries=3, retry_delay=1
    ):
        """
        调用 GPT 并在返回结果无效时重试。
        :param client: GPT 客户端实例
        :param model: 使用的模型名称
        :param chunk: 文本块
        :param k: 问题数量
        :param max_retries: 最大重试次数
        :param retry_delay: 重试间隔时间（秒）
        :return: 返回解析后的 JSON 数据或 None
        """
        retries = 0
        while retries < max_retries:
            try:
                # 调用 GPT
                completion = client.chat.completions.create(
                    model=model,
                    messages=[
                        {
                            "role": "system",
                            "content": f"""You are given a chunk of text. Your task is to read the chunk and generate a list of question-answer pairs based on the content. Each question should be relevant to the information provided in the text, and the answer should be a concise response derived directly from the chunk. The output should be formatted as a list of dictionaries, where each dictionary contains a 'question' and an 'answer' key.
1. Based on the above chunks, propose {k} questions.
2. The question must be in Chinese.
3. The subject of the problem must clearly indicate the content referred to to avoid ambiguity, and if not, ignore it.
4. For example, 'this research', 'this project', 'applicant', etc. are all unclear subjects. To avoid similar situations and avoid 'this', it is necessary to specify what research, project, or applicant it is.
The output format is as follows:
[
{{"question": "", "answer": ""}},
...
{{"question": "", "answer": ""}}
]
                        """,
                        },
                        {"role": "user", "content": chunk},
                    ],
                )
                res = completion.choices[0].message.content
                res = self._str_process(res)
                # 尝试解析 JSON
                parsed_res = json.loads(res)
                return parsed_res  # 返回解析后的 JSON 数据
            except Exception as e:
                print(f"chunk:\n{chunk}")
                print(f"gpt返回结果:\n{res}")
                print(f"错误：{e}，正在重试...")
                retries += 1
                time.sleep(retry_delay)  # 等待一段时间后重试
        print("达到最大重试次数，放弃重试。")
        return None  # 返回 None 表示失败

    def _str_process(self, text: str) -> str:
        # 对gpt输出结果进行后处理
        text = (
            text.replace(r"```", r"")
            .replace(r"\t", r"\\t")
            .replace(r"\n", r"\\n")
            .replace(r"\r", r"\\r")
            .replace("\\", "\\\\")
        )
        if text[:4] == "json":
            text = text[4:]
        return text

    def qa_process(self, file: str, json_path: str):
        print(f"{file}开始解析")
        with open(file, "r", encoding="utf-8") as f:
            text = f.read()
        text = self._text_clean(text)
        chunks = self._chunk_text(text)
        chunks = self._concatenate_chunks_to_max_length(chunks, self.max_length)
        chunks = self._get_chunker_chunks(chunks)
        result = []
        for index, chunk in enumerate(chunks):
            k = self.GENERATE_CONFIG["question_num"]
            res = self._call_gpt_with_retry(
                self.client, model="gpt-4o", chunk=chunk, k=k
            )
            if res:
                for i in res:
                    if "question" in i and "answer" in i:
                        new_data = {
                            "file": file,
                            "chunk": chunk,
                            "question": i["question"],
                            "answer": i["answer"],
                        }
                    else:
                        continue
                    result.append(new_data)
            else:
                continue
        # 保存结果
        if os.path.exists(json_path):
            with open(json_path, "r", encoding="utf-8") as f:
                data = json.load(f)
        else:
            data = []
        data.extend(result)
        with open(json_path, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        print(f"{file} 解析完成，共有 {len(chunks)} 个chunks")
        return data
def find_md_files(directory):
    md_files = []
    # 递归遍历目录
    for root, dirs, files in os.walk(directory):
        for file in files:
            # 检查文件扩展名是否为.md
            if file.endswith('.md'):
                md_files.append(os.path.join(root, file))
    return md_files

# 使用示例
directory = '/Users/yuan/Downloads/navy_poc/docs'
md_file_list = find_md_files(directory)
print(md_file_list)
print(len(md_file_list))


# qa = qac_generate()
# file_path = "data/all_md/GB 6249-2011.md"
# json_path = "qa_json_test.json"  # 保存地址
# qa.qa_process(file_path, json_path)
