import random
import json
import time
import os
from datetime import datetime

from META import USER_SETTINGS, model_config, llm_apis, model, llm_histories, local_llm, local_llm_tz, model_config
from layout_parser import parse_outline_hier, outline_to_markdown
from utlis import gen_str_codes, use_llm_api
from txt2doc import generate_doc_paths, rewrite_
from QA_moduled import checkerboard_find, checkerboard_answer, answer_stream, checkerboard_integrate_contents


# def gen_subtitles(self, ref_texts, path, doc_type, api_name='ds_api'):
#     num_subs = random.randint(2, 3) # ****UNDER-DEVELOPMENT**** should reasoned by model
#     full_path = "-->".join(path)
#     print('\tcurrent path ', full_path)
#     res_content, _ = use_llm_api(self.llm_apis[api_name],
#                     histories = self.llm_histories,
#                     paras={'task':'gen-titles-dfs',
#                             'texts':ref_texts, 
#                             'query':'', 
#                             'topic':full_path,
#                             'down_level':1,
#                             'num_subs':num_subs,
#                             'doc_type':doc_type,
#                             'model':model,
#                             'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
#                             'local_model':self.local_llm,
#                             'local_tz': self.local_llm_tz
#                             },
#                     config=self.model_config
#                     )
#     structure = self.parse_outline_flat(res_content)
#     return structure, res_content

# def gen_outlines_dfs(root_nodes, max_depth, topic, doc_type, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config, api_name='ds_api'):
#     """:function 递归生成每个标题的子标题。{'chapter': 当前标题, 'children': [...子标题结构...] }"""
#     def dfs_outline(node, level, path, ref_texts):
#         node["level"] = level  # 加这一行
#         if level >= max_depth:
#             return node, ref_texts
        
#         if node.get("children") == []:  # 只有在 children 为空时才扩展
#             node["children"], res_content = gen_subtitles(ref_texts, 
#                                                 path + [node["chapter"]], 
#                                                 doc_type, 
#                                                 USER_SETTINGS, 
#                                                 llm_apis, 
#                                                 llm_histories, 
#                                                 local_llm, 
#                                                 local_llm_tz, 
#                                                 model_config, 
#                                                 api_name=api_name)
#             header = f"\n## {' --> '.join(path + [node['chapter']])}\n"
#             ref_texts += header + res_content.strip() + '\n'

#         if level + 1 <= max_depth:
#             new_children = []
#             for child in node["children"]:
#                 updated_child, ref_texts = dfs_outline(child, level + 1, path + [node["chapter"]], ref_texts)
#                 new_children.append(updated_child)
#             node["children"] = new_children
#         return node, ref_texts
    
#     # 将纯标题列表转为统一格式
#     if all(isinstance(item, str) for item in root_nodes):
#         root_nodes = [{"chapter": t, "children": []} for t in root_nodes]
#     # 从第1级开始遍历
#     structured = []
#     ref_texts = ''
#     for node in root_nodes:
#         updated_node, ref_texts = dfs_outline(node, 1, [topic], ref_texts)
#         structured.append(updated_node)
#     return structured

# def parse_outline_flat(markdown_text):
#     """将 markdown 列表转换为扁平列表形式，适合dfs策略"""
#     lines = markdown_text.strip().splitlines()
#     result = []
#     for line in lines:
#         stripped = line.strip()
#         match = re.match(r"[-*+] (.+)", stripped)
#         if match:
#             title = match.group(1).strip()
#             result.append({"chapter": title, "children": []})
#     return result


class JsonlDB:
    def __init__(self, db_path="E:/project/gitee/checkerboard/Checkerboard/db.jsonl"):
        self.db_path = db_path
        self.uuid_index = set()
        self.record_cache = []
        self._load_index()

    def _load_index(self):
        """初始化时从文件中加载所有已有 uuid 到内存"""
        if os.path.exists(self.db_path):
            with open(self.db_path, "r", encoding="utf-8-sig") as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue  # 跳过空行
                    try:
                        record = json.loads(line)
                        uid = record.get("bid")
                        if uid:
                            self.uuid_index.add(uid)
                        self.record_cache.append(record)
                    except json.JSONDecodeError:
                        continue
        print(f"[初始化] 加载记录数: {len(self.record_cache)}")

    def save_record(self, data):
        """保存单条记录，避免重复"""
        uid = data.get("bid")
        if uid is None:
            raise ValueError("记录缺少 'uuid' 字段")

        if uid in self.uuid_index:
            print(f"[跳过] uuid={uid} 已存在")
            return False

        with open(self.db_path, "a", encoding="utf-8-sig") as f:
            f.write(json.dumps(data, ensure_ascii=False) + "\n")
        self.uuid_index.add(uid)
        self.record_cache.append(data)
        print(f"[写入] uuid={uid}")
        return True

    def save_records_batch(self, data_list):
        """批量保存记录，自动去重"""
        new_records = []
        for data in data_list:
            uid = data.get("bid")
            if uid and uid not in self.uuid_index:
                new_records.append(data)
                self.uuid_index.add(uid)

        if new_records:
            with open(self.db_path, "a", encoding="utf-8-sig") as f:
                for record in new_records:
                    f.write(json.dumps(record, ensure_ascii=False) + "\n")
            self.record_cache.extend(new_records)
            print(f"[批量写入] {len(new_records)} 条新记录")
        else:
            print("[批量写入] 无新记录")

    def search_record(self, id_value, key="bid"):
        for record in self.record_cache:
            if record.get(key) == id_value:
                return record
        return None

    def update_record(self, new_data):
        """根据 new_data 中的 bid 替换原记录"""
        bid = new_data.get("bid")
        if not bid:
            print("[更新失败] new_data 中缺少 'bid'")
            return False

        updated = False
        for i, record in enumerate(self.record_cache):
            if record.get("bid") == bid:
                self.record_cache[i] = new_data  # 完整替换
                updated = True
                break

        if not updated:
            print(f"[更新失败] 未找到 bid={bid} 的记录")
            return False
        print(f"[更新成功] bid={bid} 的记录已替换")
        return True

    def find_by_path(self, nodes, path):
        """
        根据序号路径定位嵌套章节中的具体章节对象。
        :param chapters: 当前层级的章节列表（含 chapter, serial, children）
        :param path: 表示路径的序号列表，例如 [1, 2, 2]
        :return: 定位到的章节 dict 或 None（找不到时）
        """
        current_level = nodes
        for serial in path:
            found = None
            for chapter in current_level:
                if chapter.get("serial") == serial:
                    found = chapter
                    break
            if not found:
                return None
            current_level = found.get("children", [])
        return found
    
    def update_nested_chapter(self, nodes, serial, update_title):
        """递归更新嵌套结构中指定层数 + serial 的 chapter"""
        target_node = self.find_by_path(nodes, serial)
        if target_node:
            print(f"[匹配成功] 修改前: {target_node['chapter']}")
            target_node["chapter"] = update_title
            print(f"[修改后] 修改为: {target_node['chapter']}")
            return True
        else:
            print("[未找到匹配路径] 修改失败")
            return False
    
    def flush_to_disk(self):
        """将缓存中的所有记录覆盖写入到文件中（按 bid 去重）"""
        with open(self.db_path, "w", encoding="utf-8-sig") as f:
            for record in self.record_cache:
                f.write(json.dumps(record, ensure_ascii=False) + "\n")
        print(f"[写入完成] 共写入 {len(self.record_cache)} 条记录")


class OutlineGenerator:
    def __init__(self, db, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config):
        self.db = db
        self.USER_SETTINGS = USER_SETTINGS
        self.llm_apis = llm_apis
        self.llm_histories = llm_histories
        self.local_llm = local_llm
        self.local_llm_tz = local_llm_tz
        self.model_config = model_config

    def load_title_templates(self, type): #**** UNDERDEVELOPMENT, pre-define or searching?
        root_template = ''
        if type=='goverment':
            root_template = ''
        elif type=='PPP':
            root_template = ''
            #...
        else:
            return root_template

    def gen_outline_full(self, root_titles, max_depth, require_txts, topic, doc_type, max_tokens, num_titles, api_name='ds_api'):
        '''一次性生成所有标题'''
        try:
            num_subs = random.randint(3, 5) # ****UNDER-DEVELOPMENT**** should reasoned by model
            ref_texts = '\n初始提纲\n' + outline_to_markdown(root_titles) + '\n招投标要求\n' + require_txts
            res_content, _ = use_llm_api(self.llm_apis[api_name],
                            histories = self.llm_histories,
                            paras={'task':'gen-titles-oneoff',
                                    'texts':ref_texts,
                                    'query':'', 
                                    'topic':topic,
                                    'max_depth':max_depth,
                                    'num_subs':num_subs,
                                    'doc_type':doc_type,
                                    'model':model,
                                    'max_tokens':max_tokens,
                                    'num_titles':num_titles,
                                    'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                    'local_model':self.local_llm,
                                    'local_tz':self.local_llm_tz
                                    },
                            config=self.model_config
                            )
            structure = parse_outline_hier(res_content)
            return structure
        except Exception as e:
            print('generate full titles fail, because: ', e)
            return None
        

class OutlineGenerator:
    def __init__(self, db, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config):
        self.db = db
        self.USER_SETTINGS = USER_SETTINGS
        self.llm_apis = llm_apis
        self.llm_histories = llm_histories
        self.local_llm = local_llm
        self.local_llm_tz = local_llm_tz
        self.model_config = model_config

    def load_title_templates(self, type): #**** UNDERDEVELOPMENT, pre-define or searching?
        root_template = ''
        if type=='goverment':
            root_template = ''
        elif type=='PPP':
            root_template = ''
            #...
        else:
            return root_template

    def gen_outline_full(self, root_titles, max_depth, require_txts, topic, doc_type, max_tokens, num_titles, api_name='ds_api'):
        '''一次性生成所有标题'''
        try:
            num_subs = random.randint(3, 5) # ****UNDER-DEVELOPMENT**** should reasoned by model
            ref_texts = '\n初始提纲\n' + outline_to_markdown(root_titles) + '\n招投标要求\n' + require_txts
            res_content, _ = use_llm_api(self.llm_apis[api_name],
                            histories = self.llm_histories,
                            paras={'task':'gen-titles-oneoff',
                                    'texts':ref_texts,
                                    'query':'', 
                                    'topic':topic,
                                    'max_depth':max_depth,
                                    'num_subs':num_subs,
                                    'doc_type':doc_type,
                                    'model':model,
                                    'max_tokens':max_tokens,
                                    'num_titles':num_titles,
                                    'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                    'local_model':self.local_llm,
                                    'local_tz':self.local_llm_tz
                                    },
                            config=self.model_config
                            )
            structure = parse_outline_hier(res_content)
            return structure
        except Exception as e:
            print('generate full titles fail, because: ', e)
            return None

    def gen_root_titles(self, json_input, api_name, exist_bid=None, remake=False): 
        try:
            fields = ['title', 'content', 'type', 'style', 'pages', 'words', 'num_titles', 'tables', 'quote'] # 我们可能提前需要准备一些type/style模板 政府投标/企业投标/PPP投标...
            para_dic = {field: json_input.get(field, '') for field in fields}
        except Exception as e:
            res_json = {
                'bid':bid_id,
                'exception':'key input parameters missing: '+ e,
                'issuccess':'failure'
            }
            return res_json
        
        try:
            example_texts = self.load_title_templates(para_dic['type'])
            res_content, _ = use_llm_api(self.llm_apis[api_name],
                            histories = self.llm_histories,
                            paras={'task':'gen-root-titles',
                                    'texts':example_texts,
                                    'query':'',
                                    'title':para_dic['title'],
                                    'content':para_dic['content'],
                                    'num_titles':para_dic['num_titles'],
                                    'style':para_dic['style'],
                                    'doc_type':para_dic['type'],
                                    'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                    'local_model':self.local_llm,
                                    'local_tz':self.local_llm_tz
                                    },
                            config=self.model_config
                            )
            structure = parse_outline_hier(res_content)
            res_json = {
                'title':para_dic['title'],
                'pages':para_dic['pages'],
                'words':para_dic['words'],
                'chapters':structure,
                'content':para_dic['content'],
                'style':para_dic['style'],
                'type':para_dic['type'],
                'tables':para_dic['tables'],
                'quote':para_dic['quote'],
                'code':200,
                'issuccess':'ok'
            }
            if remake:
                res_json.update({'bid':exist_bid})
                self.db.update_record(res_json)
            else:
                bid_id = gen_str_codes(json.dumps(structure, ensure_ascii=False, indent=4))
                res_json.update({'bid':bid_id})
        except Exception as e:
            res_json = {
                'bid':bid_id,
                'exception':e,
                'issuccess':'failure'
            }
        return res_json

    def modify_titles(self, json_input, api_name, target_depth=1):
        bid_id = json_input['bid']
        serial = json_input['serial'] # project descriptions or requirements
        record = self.db.search_record(bid_id)
        title_node = self.db.find_by_path(record['chapters'], serial)

        remake = json_input['remake'] # 我们可能提前需要准备一些模板 政府投标/企业投标/PPP投标...
        thoughts = json_input['thoughts']
        try:
            if remake and (not title_node==None):
                title_modified, _ = use_llm_api(self.llm_apis[api_name],
                                histories = self.llm_histories,
                                paras={'task':'rewrite-sentence',
                                        'texts':title_node['chapter'],
                                        'query':'',
                                        'add_req':thoughts,
                                        'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                        'local_model':self.local_llm,
                                        'local_tz':self.local_llm_tz
                                        },
                                config=self.model_config
                                )
            else:
                title_modified = title_node['chapter']
            
            self.db.update_nested_chapter(record['chapters'], serial, title_modified)
            res_json = {
                'bid':bid_id,
                'serial':serial,
                'update_rec':record, # only update cache in db class, not refresh local file
                'chapter':title_modified,
                'thoughts':thoughts,
                'code':200,
                'issuccess':'ok'
            }
        except Exception as e:
            res_json = {
                'bid':bid_id,
                'exception':e,
                'issuccess':'failure'
            }
        return res_json

    def regen_root_titles(self, json_input, api_name):
        remake = json_input['remake']
        record = self.db.search_record(json_input['bid'])
        regen_json = {'title':record['title'],
                    'content':record['content'],
                    'type':record['type'],
                    'style':record['style'],
                    'pages':record['pages'],
                    'words':record['words'],
                    }

        for k, v in json_input['update_reqs'].items():
            try:
                regen_json.update({k:v})
            except:
                continue
        json_output = self.gen_root_titles(regen_json, api_name, json_input['bid'], remake)
        return json_output

    def gen_detailed_titles(self, json_input, api_name='ds_api', max_depth=3, trys=1, max_tokens=3000, num_titles=50, mode='oneoff', words_per_page=600):
        bid = json_input['bid']
        record = self.db.search_record(bid)
        structure = record['chapters']
        total_words = record['words']
        total_pages = record['pages']

        res_json = {
            'total':trys,
            'code':200,
            'bid':bid,
            'type':record['type'],
            'style':record['style'],
            'words':total_words,
            'pages':total_pages,
            'tables':record['tables'], # use of tables
            'quote':record['quote'], # track quote materials
            'data':[]
        }
        try:
            for i in range(0, trys):
                outline = self.gen_outline_full(structure, max_depth, json_input['thoughts'], record['title'], record['type'], max_tokens, num_titles, api_name)
                outline, preTotalWordCnt, pageCnt = self.assign_words_pages(outline, total_words, words_per_page)
                time_ = datetime.now().isoformat()
                
                if not outline==None:
                    version = 'ver_000' + str(i)
                    res_json['data'].append({
                        'version':version,
                        'gmtModified':time_,
                        'catalog':outline,
                        'preTotalWordCnt':preTotalWordCnt,
                        'pageCnt':pageCnt,
                        'code':200
                    })
                else:
                    res_json['data'].append({
                        'version':i,
                        'gmtModified':time_,
                        'catalog':'Null',
                        'preTotalWordCnt':-1,
                        'pageCnt':-1,
                        'code':300 # define the code
                    })
            self.db.update_record(res_json)
            self.db.flush_to_disk()
            return res_json
        except Exception as e:
            res_json = {
                'bid':bid,
                'exception':e,
                'issuccess':'failure'
            }
            return res_json
    
    def assign_words_pages(self, chapters, total_words, words_per_page=600):
        """
        1. 分配总字数给所有叶子节点（目前平均分）
        2. 自底向上计算每个章节的 word_count；
        3. 同时估算每个章节的页数（向上取整）。
        """
        # Step 1: 找到所有叶子节点
        leaf_nodes = []
        def find_leaves(nodes):
            for node in nodes:
                if not node.get("children"):
                    leaf_nodes.append(node)
                else:
                    find_leaves(node["children"])
        
        find_leaves(chapters)
        if not leaf_nodes:
            raise ValueError("没有叶子节点，无法分配字数")

        # Step 2: 分配字数
        words_per_leaf = total_words // len(leaf_nodes)
        for node in leaf_nodes:
            node["words"] = words_per_leaf
            node["pages"] = -(-words_per_leaf // words_per_page)  # 向上取整

        # Step 3: 自底向上回填 word_count 和 page_count
        def compute_counts(nodes):
            for node in nodes:
                children = node.get("children", [])
                if children:
                    compute_counts(children)
                    node["words"] = sum(child["words"] for child in children)
                    node["pages"] = sum(child["pages"] for child in children)
        compute_counts(chapters)

        preTotalWordCnt = sum(ch["words"] for ch in chapters)
        pageCnt = sum(ch["pages"] for ch in chapters)
        return chapters, preTotalWordCnt, pageCnt


class ContentGenerator:
    def __init__(self, db, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config):
        self.db = db
        self.USER_SETTINGS = USER_SETTINGS
        self.llm_apis = llm_apis
        self.llm_histories = llm_histories
        self.local_llm = local_llm
        self.local_llm_tz = local_llm_tz
        self.model_config = model_config
        self.rewrite_fields = ['words', 'topic', 'avoid_topics', 'type', 'style', 'pages']

    def build_tree(self, nodes):
        result = {}
        for node in sorted(nodes, key=lambda x: x.get("serial", 0)):
            chapter = node["chapter"]
            serial = node["serial"]
            words = node.get("words", 0)
            pages = node.get("pages", 0)
            children = node.get("children", [])

            result[chapter] = {
                "serial": serial,
                "words": words,
                "pages": pages,
                "children": self.build_tree(children) if children else {}
            }
        return result
        
    def extract_topics(self, inte_contents, doc_type, words_per_page, split_char='-'):
        num_topics = int(len(inte_contents)//words_per_page)
        topics, _ = use_llm_api(self.llm_apis[api_name],
                        histories = self.llm_histories,
                        paras={'task':'extract-topics',
                                'texts':inte_contents,
                                'query':'',
                                'doc_type':doc_type,
                                'num_topics':num_topics,
                                'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                'local_model':self.local_llm,
                                'local_tz':self.local_llm_tz
                                },
                        config=self.model_config
                        )
        topics = [t.strip() for t in topics.split(split_char) if not t.strip()==""]
        return topics, len(topics)

    def extract_refcontents(self, inte_contents, doc_type, topic, avoid_topics):
        ref_contents, _ = use_llm_api(self.llm_apis[api_name],
                        histories = self.llm_histories,
                        paras={'task':'extract-contents',
                                'texts':inte_contents,
                                'query':'',
                                'doc_type':doc_type,
                                'topic':topic,
                                'avoid_topics':avoid_topics,
                                'local_model_name':self.USER_SETTINGS['LOCAL_LLM_NAME'],
                                'local_model':self.local_llm,
                                'local_tz':self.local_llm_tz
                                },
                        config=self.model_config
                        )
        return ref_contents

    def insert_leaf_contents(self, tree, content_items, link='-->'):
        """将生成的叶子节点内容填入原始树结构。每条 content_items 需要包含 path 和 content 字段"""
        for item in content_items:
            keys = item["parents"].split(link)
            content = item["content"]
            current = tree

            for key in keys[:-1]:
                matched = None
                for node in current:
                    if node["chapter"] == key:
                        matched = node
                        break
                if matched is None:
                    print(f"[警告] 未找到路径：{' → '.join(keys)}, 中断")
                    break
                current = matched.get("children", [])

            leaf_key = keys[-1]
            for node in current:
                if node["chapter"] == leaf_key and (not node.get("children") or isinstance(node["children"], list)):
                    node["children"] = content
                    break
            else:
                print(f"[未匹配叶子] {item['path']}")

    def gen_contents(self, json_input, api_name, limit=-1, topk=4, link='-->',  words_per_topic=600, mode='naive'):  # limit, 每次只处理几个标题    
        record = self.db.search_record(json_input['bid'])
        #完整路径path:huhuhhuhuhuh
        res_json = {
            'bid':json_input['bid']
        }
        
        try:
            st = time.time()
            for data in record['data']:
                if data['version']==json_input['version']: # only allow selecting one version
                    catalog = data['catalog']
                    tree = self.build_tree(catalog)
                    search_keys = generate_doc_paths(tree, link=link)
                    if limit>0:
                        search_keys = search_keys[:limit]

                    content_items = []
                    for i, key in enumerate(search_keys):
                        inner_topic_input = {'topic':key['path'], 
                                            'words':int(key['words']),
                                            'pages':key['pages'],
                                            'style':record['style'],
                                            'type':record['type'],
                                            'thoughts':json_input['thoughts']}
                                
                        if mode=='naive':
                            inner_contents = 'TEST ONLY' #rewrite_('', self.llm_apis[api_name], self.USER_SETTINGS['LOCAL_LLM_NAME'], self.local_llm, self.local_llm_tz, self.llm_histories, self.model_config, task='rewrite-paras', add_paras=inner_topic_input, rewrite_fields=self.rewrite_fields)
                            inner_words = len(inner_contents)
                            # answer_stream(key, res_search['sim_contents'], act_marker='重写', llm_histories=llm_histories, api_name=api_name, save_session=save_session, show_image=False, add_paras=inner_topic_input)       
                        else:
                            inner_contents = ""
                            inner_words = 0
                            res_search = checkerboard_find(key['path'], topk, data_type=1) # 线程 1
                            inte_contents, _, _ = checkerboard_integrate_contents(key['path'], res_search['sim_contents'], show_image=False)
                            inner_topics, num_t = self.extract_topics(inte_contents, record['type'], words_per_topic) # 线程2 generate sub topics

                            for topic in inner_topics:
                                avoid_topics = '; '.join([at for at in inner_topics if at!=topic])
                                ref_contents = self.extract_refcontents(inte_contents, record['type'], topic, avoid_topics) # 线程3
                                inner_topic_input.update({'avoid_topics':avoid_topics, 'topic':(key['path'] + '>' + topic), 'words':int(key['words']//num_t)})
                                # 线程4 (需要流式)
                                res_content = rewrite_(ref_contents, self.llm_apis[api_name], self.USER_SETTINGS['LOCAL_LLM_NAME'], self.local_llm, self.local_llm_tz, self.llm_histories, self.model_config, task='rewrite-paras', add_paras=inner_topic_input, rewrite_fields=self.rewrite_fields)
                                inner_contents = inner_contents + '\n' + res_content
                                inner_words += len(res_content)

                        current_res = {'parents':key['path'], 
                                       'content':inner_contents.strip(), 
                                       'preWordCnt':inner_words, 
                                       'pageCnt':-1, # this needs to be checked
                                       'docId':gen_str_codes(key['path']), 
                                       'code':200}
                        content_items.append(current_res)
                    # fill contents into the catalog
                    self.insert_leaf_contents(catalog, content_items)
                    res_json.update({
                        'data_with_contents':catalog,
                        'code':200,
                        'time_consumed':time.time()-st
                    })
                else:
                    continue
        except Exception as e:
            res_json = {
                'bid':bid,
                'exception':e,
                'issuccess':'failure'
            }
        return res_json
    
    
if __name__ == "__main__":
    model = 'deepseek-chat'
    db_record_path = r'D:\OneDrive\Code Warehouse\Prototype\db.jsonl' # **** indiate a path to maintain all records
    
    db = JsonlDB(db_record_path)
    og = OutlineGenerator(db, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config)
    cg = ContentGenerator(db, USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz, model_config)

    if USER_SETTINGS['USE_LOCAL_LLM']:
        api_name = 'local_api'
    else:
        api_name = 'ds_api'

    print('TEST FIRST INTERACTION: generate root tiles (first-level titles)') # PASSED
    json_input = {
        'title': '深圳市南山区实验小学地基（桩基）施工项目',
        'content': '''
            测量放线→基础土方开挖→基础石方开挖→模板支设（原槽时无模板）→基础钢筋绑扎（墙、柱插筋）→基础混凝土浇筑→短柱、墙浇筑→基础转序验收→回填。
        ''',
        'type': '建筑工程投标',
        'style': '正式商务',
        'num_titles':random.randint(4,5),
        'pages':100,
        'words':100000,
        'tables':'more',
        'quote':''
    }
    json_output = og.gen_root_titles(json_input, api_name=api_name)
    db.save_record(json_output)

    bid = random.choice([r for r in db.record_cache if "bid" in r])["bid"]
    print('TEST SECOND INTERACTION: change one of the titles based on id {}'.format(bid)) # PASSED
    json_input = {
        'bid':bid,
        'serial':[1],
        'remake':True,
        'thoughts':'更详细专业一些' # 用户修改思路
    }
    json_output = og.modify_titles(json_input, api_name, target_depth=1)

    print('TEST THIRD INTERACTION: re-generate all root-level titles') # PASSED
    json_input = {
        'bid':bid,
        'remake':True,
        'update_reqs':{'thoughts':'整体都更简短一些。', 'num_titles':random.randint(4,5), 'pages':100, 'words':10000}, # can be expanded
    }
    json_output = og.regen_root_titles(json_input, api_name)

    print('FOURTH INTERACTION: genearte detailed outlines') # PASSED
    json_input = {'bid':bid,
                  'thoughts':'整体简短。'}
    json_output = og.gen_detailed_titles(json_input, api_name)

    print('FIFTH INTERACTION: generate contents by dfs')
    json_input = {'bid':bid,
                  'version': 'ver_000'+ str(0),
                  'thoughts':'生成的标题要具体，不要笼统。'
                  }
    
    json_output = cg.gen_contents(json_input, api_name, limit=30, mode='naive')
    db.flush_to_disk()
    print(json_output)