import argparse
import json
import os
import re
import time
from typing import List

import jsonlines

from chatGPT import ChatGPT
from deepseek import DeepSeek
from logger import Logger

parser = argparse.ArgumentParser()

parser.add_argument("--book_id", default=0, type=int)
# parser.add_argument("--json_path", type=str)


class MetaData:
    def __init__(self) -> None:
        """
        store metadata of one data item
        contains the following fields:
         - index: the index of the data item, which is corresponding to the [id] of the text block in the original text
         - sub_index: the sub index of the data item (one text block may generate multiple Q&A pairs)
         - text: raw text content for asking chatGPT
         - bookname: the book name where the text comes from
         - book_type: the type of the book ["code", "textbook", "wiki"]

        """
        self.index = None
        self.sub_index = None
        self.text = None
        self.bookname = None
        self.book_type = None

    def merge_into_dict(self, data):
        """
        merge the metadata into a dictionary
        """
        data["index"] = self.index
        data["sub_index"] = self.sub_index
        data["bookname"] = self.bookname
        data["book_type"] = self.book_type

        return data

    def clear(self):
        self.index = None
        self.sub_index = None
        self.text = None
        self.bookname = None
        self.book_type = None


class DataCreator:
    def __init__(self, result_dir, api_key) -> None:
        self.logger = Logger("data_creator.log")
        # self.chatgpt = ChatGPT(self.logger)
        self.lang_model = DeepSeek(self.logger, api_key)
        self.meta_data = MetaData()
        os.makedirs(result_dir, exist_ok=True)
        self.result_dir = result_dir

    def curate_data_item(self, qa_pairs, item_id):
        """
        curate a single data item from chatGPT response
        the response is a list of Q&A pairs in format:
        [{"Q":"问题1", "A":"答案1"}]
        """
        data = []
        if len(qa_pairs) > 0:
            # response = json.loads(response)
            for i, qa_pair in enumerate(qa_pairs):
                self.meta_data.index = item_id
                self.meta_data.sub_index = i
                data_item = {"input": qa_pair["Q"], "output": qa_pair["A"], "status": "success"}
                data_item = self.meta_data.merge_into_dict(data_item)
                data.append(data_item)
            print(">>> Item ID: ", item_id, "  success：", len(qa_pairs), " pairs")
        else:
            data_item = {"id": item_id, "input": "生成失败", "output": "生成失败", "status": "failed"}
            data.append(data_item)
            print(">>> Item ID: ", item_id, "  failed")
        return data

    def prepare_todo_list(self, json_path: str):
        """
        prepare data list to be processed
        filter out those successfully processed
        """
        if json_path.endswith(".json"):
            with open(json_path, "r", encoding="utf-8") as f:
                raw_text_list = json.load(f)
        elif json_path.endswith(".jsonl"):
            with jsonlines.open(json_path, "r") as reader:
                raw_text_list = [item for item in reader.iter()]

        book_name = os.path.splitext(os.path.basename(json_path))[0]
        # res_jsonl_path = os.path.join(self.result_dir, f"{book_name}.jsonl")
        # avoid different back slash in windows and linux
        res_jsonl_path = os.path.join(self.result_dir, f"{book_name}.jsonl").replace("\\", "/")
        if os.path.exists(res_jsonl_path):
            with jsonlines.open(res_jsonl_path, "r") as reader:
                done_list = [item["index"] for item in reader.iter() if item["status"] == "success"]
                done_list = set(done_list)
        else:
            done_list = []

        todo_list = [item for item in raw_text_list if item["id"] not in done_list]
        return todo_list

    def process_single(self, json_path, book_type="textbook"):
        """
        process single json file
        each json file is a list of text blocks extracted from a book
        """
        todo_list = self.prepare_todo_list(json_path)
        book_name = os.path.splitext(os.path.basename(json_path))[0]
        # remove the leading numbers
        clean_book_name = re.sub(r"^\d+", "", book_name)

        # set the metadata
        self.meta_data.bookname = clean_book_name
        self.meta_data.book_type = book_type

        # start processing
        self.logger.log("\n\n" + "=" * 50)
        self.logger.log(f"Start processing book: {book_name} at {time.ctime()}")

        i = 0
        while i < len(todo_list):
            text_block = todo_list[i]
            item_id = text_block["id"]
            text = text_block["content"]
            chatpter = text_block["chapter"]
            section = text_block["section"]
            sub_section = text_block["sub_section"]

            material = f"以下是材料：取自《{clean_book_name}》:{chatpter} {section} {sub_section}\n{text}"
            qa_pairs = self.lang_model.avoid_eqn_nums(material)
            # qa_pairs = self.lang_model.multi_round(material, text_block)
            if qa_pairs:
                dataset = self.curate_data_item(qa_pairs, item_id)
                self.save_result(dataset, book_name)
            else:
                print(">>> Item ID: ", item_id, "  failed: no qa_pairs")
            i += 1
            # break

        self.logger.log(f"Finished processing book: {book_name} at {time.ctime()}")
        self.logger.log("=" * 50, 2)

    def save_result(self, data, book_name):
        """
        save the result to jsonl file
        """
        save_path = os.path.join(self.result_dir, f"{book_name}.jsonl")
        with jsonlines.open(save_path, "a") as writer:
            for item in data:
                writer.write(item)


if __name__ == "__main__":
    api_keys = []
    args = parser.parse_args()
    book_id = args.book_id
    all_api_num = 11

    # json directory
    book_path = r"E:\Common\Desktop\LLM\Codes\FineTuneData\text_blocks\books"

    with open("deepseek_keys.txt", "r", encoding="utf-8") as f:
        api_keys = f.readlines()

    result_dir = "finetune_dataset/from_deepseek/books"
    book_list = os.listdir(book_path)
    # print("id:", api_id)
    work_jsons = []
    book = book_list[book_id]
    api_key = api_keys[book_id % all_api_num]
    print("book id:{} api_key id:{} book name:{}".format(book_id, book_id % all_api_num, book))

    # api_key = "sk-0f3f427171594434a31a4e2a9c479320"
    # 1. 这里传入保存的文件夹位置，注意用相对路径比较好，防止bug
    # creator = DataCreator("finetune_dataset/from_deepseek/books")

    # 2. 这里放入需要处理的json文件路径（提取出来的文本片段），可以是多个，一行一个
    # 这个可以用绝对路径
    # jsons_dir = "text_blocks/books"
    # 3. 处理规范的时候记得把 textbook 改成另外的名字，这个是标识这个数据是从哪里来的
    # 另外处理规范的prompt可以在ChatGPT那个文件里对应修改

    json_path = "{}\\{}".format(book_path, book)  # + r"text_blocks\books\0002工程结构抗震.json"
    print(">>> Processing: ", json_path)
    creator = DataCreator(result_dir, api_key.strip())
    creator.process_single(json_path, "textbook")
