import os
import numpy as np
# import tiktoken
from tqdm import trange,tqdm
import time
# import torch
from src.model import APIModel
# from src.database import database
from src.utils import tokenCounter,sanitize_folder_name
from src.prompt import MERGING_OUTLINE_PROMPT, SUBSECTION_OUTLINE_PROMPT, EDIT_FINAL_OUTLINE_PROMPT,JUDGE_RELEVANT_PROMPT,ROUGH_STRUCTURE_PROMPT, SUBSECTION_OUTLINE_PROMPT_INTRO,SUBSECTION_OUTLINE_PROMPT_OTHERS,SUBSECTION_OUTLINE_PROMPT_NOTATION
import concurrent.futures
from chatglm_api import *
from bs4 import BeautifulSoup
import re
def process_chunks(chunks):
    """
    处理chunks，只保留第0个chunk和包含introduction或related work的chunk，
    如果第0个chunk也包含introduction或related work，则不重复添加。
    introduction和related work分开进行判断。
    """
    processed_chunks = []
    
    # 提前编译正则表达式，提升匹配效率
    intro_pattern = re.compile(r'\b(introduction)\b', re.IGNORECASE)
    related_work_pattern = re.compile(r'\b(related work)\b', re.IGNORECASE)
    
    intro_seen = False
    related_work_seen = False
    
    for chunk in chunks:
        chunk_id = chunk['chunk_id']
        chunk_text = chunk['chunk_text']

        # 第0个chunk特殊处理
        if chunk_id == 0:
            processed_chunks.append(chunk)
            if intro_pattern.search(chunk_text):
                intro_seen = True
            continue
        
        # 检查是否包含introduction
        if not intro_seen and intro_pattern.search(chunk_text):
            processed_chunks.append(chunk)
            intro_seen = True
        
        # 检查是否包含related work
        if not related_work_seen and related_work_pattern.search(chunk_text):
            processed_chunks.append(chunk)
            related_work_seen = True
    
    return processed_chunks

def fetch_and_process_paper(paper_info):
    paper_id = paper_info['paper_id']
    result = query_paper_by_id(paper_id, top_k=100)
    
    # 提取data部分并进行处理
    processed_data = process_chunks(result.get('data', []))
    
    # 合并相同paper_id的chunks到一起
    merged_text = ''
    for chunk in processed_data:
        merged_text += chunk['chunk_text'] + '\n'
    
    return {
        'paper_id': paper_id,
        'paper_title': paper_info['paper_title'],
        'merged_text': merged_text
    }

def process_relevant_papers(unique_paper_list, judge_relevant_res, max_workers=5):
    # 确保 judge_relevant_res 和 unique_paper_list 的长度一致
    if len(judge_relevant_res) != len(unique_paper_list):
        raise ValueError("The lengths of judge_relevant_res and unique_paper_list must match.")

    relevant_papers = []
    # relevant_paper_infos = [unique_paper_list[i] for i, is_relevant in enumerate(judge_relevant_res) if is_relevant.lower() == 'yes']
    relevant_paper_infos = [unique_paper_list[i] for i, relevance_score in enumerate(judge_relevant_res) if int(relevance_score) > 3]

    # from concurrent.futures import ThreadPoolExecutor, as_completed
    # # 创建一个tqdm进度条，总数为相关论文的数量
    # max_workers = os.cpu_count()
    # with tqdm(total=len(relevant_paper_infos), desc="Processing relevant papers") as pbar:
    #     with ThreadPoolExecutor(max_workers=max_workers) as executor:
    #         # 提交所有任务到线程池
    #         futures = {executor.submit(fetch_and_process_paper, paper_info): paper_info for paper_info in relevant_paper_infos}

    #         # 处理完成的任务
    #         for future in as_completed(futures):
    #             processed_paper = future.result()
    #             relevant_papers.append(processed_paper)
    #             pbar.update(1)

    # return relevant_papers
    return relevant_paper_infos


def process_search_results(rough_search_data):
    """
    处理搜索结果，提取唯一的 paper_id 和 paper_title，
    并保留每个 paper_id 首次出现时的 chunk_text。
    """
    unique_papers = {}
    
    # 遍历 data['data'] 列表并添加 (paper_id, paper_title) 对到字典中
    for item in rough_search_data['data']:
        if 'entity' in item and 'paper_id' in item['entity'] and 'paper_title' in item['entity']:
            paper_id = item['entity']['paper_id']
            paper_title = item['entity']['paper_title']
            chunk_text = item['entity']['chunk_text'] # 获取当前项的 chunk_text
            
            # 只在第一次遇到该 paper_id 时保存其 chunk_text
            if paper_id not in unique_papers:
                unique_papers[paper_id] = {
                    'paper_id': paper_id,
                    'paper_title': paper_title,
                    'chunk_text': chunk_text
                }
    
    # 将字典转换为列表，以便于查看或进一步处理
    unique_paper_list = list(unique_papers.values())
    print("unique_paper_list have been obtained, the length is {}.".format(len(unique_paper_list)))
    
    return unique_paper_list

class outlineWriter():
    
    def __init__(self, model:str, api_key:str, api_url:str, database) -> None:
        
        self.model, self.api_key, self.api_url = model, api_key, api_url 
        self.api_model = APIModel(self.model, self.api_key, self.api_url)
        self.api_model_flash = APIModel("glm-4-flash", self.api_key, self.api_url)

        self.db = database
        self.token_counter = tokenCounter()
        self.input_token_usage, self.output_token_usage = 0, 0

    def draft_outline(self, topic, reference_num, chunk_size = 30000, section_num = 6):
        rough_search_data = search_papers(topic, top_k=reference_num)
        rough_search_data_2 = search_papers(topic+' survey', top_k=reference_num) # 防止赛题topic存在现成的survey
        rough_search_data['data'].extend(rough_search_data_2['data'])

        unique_paper_list = process_search_results(rough_search_data)
        prompts = []
        for unique_paper in unique_paper_list:
            prompt = self.__generate_prompt(JUDGE_RELEVANT_PROMPT, paras={'TOPIC': topic,
            'TITLE': unique_paper['paper_title'],\
            'SEGMENT':BeautifulSoup(unique_paper['chunk_text'], 'html.parser').get_text(separator='|').strip()})
            prompts.append(prompt)
        print('JUDGE RELEVANT has began.')

        judge_relevant_res = self.api_model_flash.batch_chat(prompts, temperature=0.3,validate_response=True,max_threads=500)  # Time cost: 1k paper ~ 20s
        
        # print('Filt RELEVANT has began. (No LLM)')
        relevant_paper_list = process_relevant_papers(unique_paper_list, judge_relevant_res)
        print('Generate rough structure has began.')
        # import ipdb;ipdb.set_trace()
        outlines = self.generate_rough_structure(topic=topic, papers_list = relevant_paper_list, client_num = 5, section_num=section_num)

        section_outline = self.merge_outlines(topic=topic, outlines=outlines) # 参考autosurvey

        # 暂时不考虑conclusion的写作
        print('Generate subsection structure has began.')
        subsection_outlines = self.generate_subsection_outlines(topic=topic, section_outline= section_outline,rag_num= 50) # intro不搜，conclusion不分，其它基于搜索+共享prompt
        print('Merged_outline has began.')
        merged_outline = self.process_outlines(section_outline, subsection_outlines)
        # import ipdb;ipdb.set_trace()
        # edit final outline
        final_outline = self.edit_final_outline(merged_outline)
        # BeautifulSoup(final_outline, 'html.parser')
        return final_outline


    def compute_price(self):
        return self.token_counter.compute_price(input_tokens=self.input_token_usage, output_tokens=self.output_token_usage, model=self.model)

    def generate_rough_structure(self, topic, papers_list, client_num, section_num):
        import random
        shuffled_papers_list = random.sample(papers_list, len(papers_list))
        
        # 2. 分割 papers_list 成 client_num 份
        split_papers_lists = np.array_split(shuffled_papers_list, client_num)
        prompts = []
        # 3. 对每一份 papers_list 生成 prompt 并添加到 prompts 列表
        for i in trange(len(split_papers_lists), desc="Generating prompts"):
            split_papers = split_papers_lists[i]
            paper_texts = []

            for paper in split_papers:
                titles = paper['paper_title']
                # papers = BeautifulSoup(paper['merged_text'], 'html.parser')
                papers = BeautifulSoup(paper['chunk_text'], 'html.parser')
                paper_texts.append(f'---\npaper_title: {titles}\n\npaper_content:\n\n{papers}\n')
            # 将所有论文文本合并为一个字符串
            combined_paper_texts = ''.join(paper_texts)

            # 生成 prompt
            prompt = self.__generate_prompt(
                ROUGH_STRUCTURE_PROMPT,
                paras={'PAPER LIST': combined_paper_texts, 'TOPIC': topic}
            )
            # import ipdb;ipdb.set_trace()
            # 添加到 prompts 列表
            prompts.append(prompt)

        # self.input_token_usage += self.token_counter.num_tokens_from_list_string(prompts)
        # import ipdb;ipdb.set_trace()
        outlines = self.api_model_flash.batch_chat(text_batch=prompts, temperature=0.3)
        self.output_token_usage += self.token_counter.num_tokens_from_list_string(outlines)
        return outlines

    
    def merge_outlines(self, topic, outlines):
        
        outline_texts = '' 
        for i, o in zip(range(len(outlines)), outlines):
            outline_texts += f'---\noutline_id: {i}\n\noutline_content:\n\n{o}\n'
        outline_texts+='---\n'
        prompt = self.__generate_prompt(MERGING_OUTLINE_PROMPT, paras={'OUTLINE LIST': outline_texts, 'TOPIC':topic})
        # self.input_token_usage += self.token_counter.num_tokens_from_string(prompt)

        outline = self.api_model.chat(prompt, temperature=0.3)
        self.output_token_usage += self.token_counter.num_tokens_from_string(outline)
        return outline
    
    def generate_subsection_outlines(self, topic, section_outline, rag_num):

        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(section_outline)
        # import ipdb;ipdb.set_trace()
        prompts = []
        MAX_TOKENS = 100000  # 128k token 限制 
        for index, (section_name, section_description) in enumerate(zip(survey_sections, survey_section_descriptions)):
            if index == 0: # introduction
                prompt = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_INTRO, 
                paras={'OVERALL OUTLINE': section_outline,
                        'SECTION NAME': section_name,\
                        'SECTION DESCRIPTION':section_description,'TOPIC':topic})
            elif index == 1: # notation
                paper_texts = '' 
                search_data = search_papers(section_description, top_k=40)
                for item in search_data['data']:
                    if 'entity' in item and 'paper_id' in item['entity'] and 'paper_title' in item['entity']:
                        paper_id = item['entity']['paper_id']
                        paper_title = item['entity']['paper_title']
                        chunk_text = item['entity']['chunk_text'] # 获取当前项的 chunk_text

                        paper_texts += f'---\npaper_title: {paper_title}\n\npaper_content:\n\n{chunk_text}\n'
                        paper_texts+='---\n'
                prompt = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_NOTATION, 
                                                paras={'OVERALL OUTLINE': section_outline,
                                                       'SECTION NAME': section_name,\
                                                        'SECTION DESCRIPTION':section_description,'TOPIC':topic,'PAPER LIST':paper_texts})
            else: # Notation, Method, Free section, Future section
                paper_texts = '' 
                search_data = search_papers(section_description, top_k=40)
                for item in search_data['data']:
                    if 'entity' in item and 'paper_id' in item['entity'] and 'paper_title' in item['entity']:
                        paper_id = item['entity']['paper_id']
                        paper_title = item['entity']['paper_title']
                        chunk_text = item['entity']['chunk_text'] # 获取当前项的 chunk_text

                        paper_texts += f'---\npaper_title: {paper_title}\n\npaper_content:\n\n{chunk_text}\n'
                        paper_texts+='---\n'
                        # **3️⃣ 计算模板**
                fixed_template = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_OTHERS, 
                                                paras={'OVERALL OUTLINE': section_outline,
                                                       'SECTION NAME': section_name,\
                                                        'SECTION DESCRIPTION':section_description,'TOPIC':topic})
                fixed_tokens = len(self.token_counter.encoding.encode(fixed_template))
                remaining_tokens = MAX_TOKENS - fixed_tokens
                # **4️⃣ 截断文本**
                paper_text_str = BeautifulSoup(paper_texts, 'html.parser').get_text()
                paper_tokens = len(self.token_counter.encoding.encode(paper_text_str))
                if paper_tokens > remaining_tokens:
                    tokens = self.token_counter.encoding.encode(paper_text_str)[:remaining_tokens]
                    truncated_text = self.token_counter.encoding.decode(tokens).rsplit('. ', 1)[0] + '. '
                else:
                    truncated_text = paper_text_str
                # **5️⃣ 生成最终 prompt**
                prompt = self.__generate_prompt(
                    fixed_template,
                    paras={
                        'PAPER LIST': truncated_text
                    }
                )
                # prompt = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_OTHERS, 
                #                                 paras={'OVERALL OUTLINE': section_outline,
                #                                        'SECTION NAME': section_name,\
                #                                         'SECTION DESCRIPTION':section_description,'TOPIC':topic,'PAPER LIST':paper_texts})
                # def truncate_paper_list_binary(paper_text, max_tokens):
                #         """ 使用二分法截断 PAPER LIST，确保 prompt 在 token 限制内 """
                #         low, high = 0, len(paper_text)
                #         best_fit = paper_text  # 记录最优截断版本
                #         while low <= high:
                #             mid = (low + high) // 2
                #             truncated_text = paper_text[:mid]
                #             # 计算截断后的 token 数量
                #             prompt = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_OTHERS, 
                #                                 paras={'OVERALL OUTLINE': section_outline,
                #                                        'SECTION NAME': section_name,\
                #                                         'SECTION DESCRIPTION':section_description,'TOPIC':topic,'PAPER LIST':paper_texts})
                #             token_count = self.token_counter.num_tokens_from_string(prompt)
                #             if token_count > max_tokens:
                #                 high = mid - 1000  # 继续缩小 paper_list
                #             else:
                #                 best_fit = truncated_text
                #                 low = mid + 1000  # 试图保留更多内容
                #         return best_fit
                # if self.token_counter.num_tokens_from_string(prompt) > MAX_TOKENS:
                #         paper_texts = truncate_paper_list_binary(paper_texts, MAX_TOKENS)
                #         # 重新生成符合 token 限制的 prompt
                #         prompt = self.__generate_prompt(SUBSECTION_OUTLINE_PROMPT_OTHERS, 
                #                                 paras={'OVERALL OUTLINE': section_outline,
                #                                        'SECTION NAME': section_name,\
                #                                         'SECTION DESCRIPTION':section_description,'TOPIC':topic,'PAPER LIST':paper_texts})
            prompts.append(prompt)
        # self.input_token_usage += self.token_counter.num_tokens_from_list_string(prompts)

        sub_outlines = self.api_model_flash.batch_chat(prompts, temperature=0.3)

        self.output_token_usage += self.token_counter.num_tokens_from_list_string(sub_outlines)
        return sub_outlines

    def edit_final_outline(self, outline):
        
        prompt = self.__generate_prompt(EDIT_FINAL_OUTLINE_PROMPT, paras={'OVERALL OUTLINE': outline})
        # self.input_token_usage += self.token_counter.num_tokens_from_string(prompt)
        outline = self.api_model.chat(prompt, temperature=0.3)
        self.output_token_usage += self.token_counter.num_tokens_from_string(outline)
        return outline.replace('<format>\n','').replace('</format>','')
 
    def __generate_prompt(self, template, paras):
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def extract_title_sections_descriptions(self, outline):
        title = outline.split('Title: ')[1].split('\n')[0]
        sections, descriptions = [], []
        for i in range(100):
            if f'Section {i+1}' in outline:
                sections.append(outline.split(f'Section {i+1}: ')[1].split('\n')[0])
                descriptions.append(outline.split(f'Description {i+1}: ')[1].split('\n')[0])
        return title, sections, descriptions
    
    def extract_subsections_subdescriptions(self, outline):
        subsections, subdescriptions = [], []
        for i in range(100):
            if f'Subsection {i+1}' in outline:
                subsections.append(outline.split(f'Subsection {i+1}: ')[1].split('\n')[0])
                subdescriptions.append(outline.split(f'Description {i+1}: ')[1].split('\n')[0])
        return subsections, subdescriptions
    
    def chunking(self, papers, titles, chunk_size = 14000):
        paper_chunks, title_chunks = [], []
        total_length = self.token_counter.num_tokens_from_list_string(papers)
        num_of_chunks = int(total_length / chunk_size) + 1
        avg_len = int(total_length / num_of_chunks) + 1
        split_points = []
        l = 0
        for j in range(len(papers)):
            l += self.token_counter.num_tokens_from_string(papers[j])
            if l > avg_len:
                l = 0
                split_points.append(j)
                continue
        start = 0
        for point in split_points:
            paper_chunks.append(papers[start:point])
            title_chunks.append(titles[start:point])
            start = point
        paper_chunks.append(papers[start:])
        title_chunks.append(titles[start:])
        return paper_chunks, title_chunks
       
    def process_outlines(self, section_outline, sub_outlines):
        res = ''
        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(outline=section_outline)
        res += f'# {survey_title}\n\n'
        for i in range(len(survey_sections)):
            section = survey_sections[i]
            res += f'## {i+1} {section}\nDescription: {survey_section_descriptions[i]}\n\n'
            subsections, subsection_descriptions = self.extract_subsections_subdescriptions(sub_outlines[i])
            for j in range(len(subsections)):
                subsection = subsections[j]
                res += f'### {i+1}.{j+1} {subsection}\nDescription: {subsection_descriptions[j]}\n\n'
        return res

