import os
import re
import threading
import numpy as np
from tqdm import trange,tqdm
from src.model import APIModel
import time
from src.utils import tokenCounter
import copy
import json
from src.prompt import SUBSECTION_WRITING_PROMPT, LCE_PROMPT, CHECK_CITATION_PROMPT
import logging
import threading
import time

# 配置日志
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

class subsectionWriter():
    """
    小节写作器类，用于生成和管理调查报告的小节内容
    """
    def __init__(self, model: str, api_key: str, api_url: str, database) -> None:
        """
        初始化小节写作器
        Args:
            model: 使用的模型名称
            api_key: API密钥
            api_url: API地址
            database: 数据库实例
        """
        self.model, self.api_key, self.api_url = model, api_key, api_url
        self.api_model = APIModel(self.model, self.api_key, self.api_url)

        self.db = database
        self.token_counter = tokenCounter()
        self.input_token_usage, self.output_token_usage = 0, 0

    def write(self, topic, outline, rag_num=30, subsection_len=300, refining=True, reflection=True):
        """
        写作主函数，用于生成完整的调查报告
        Args:
            topic: 主题
            outline: 大纲
            rag_num: 检索相关文献数量
            subsection_len: 小节长度
            refining: 是否进行优化
            reflection: 是否进行反思
        Returns:
            生成的调查报告内容
        """
        # 获取数据库
        # 解析outline
        parsed_outline = self.parse_outline(outline=outline)
        print("\n parsed_outline is  \n", parsed_outline )
        section_content = [[]] * len(parsed_outline['sections'])
        print("\n section_content is  \n", section_content )
    
        section_paper_texts = [[]] * len(parsed_outline['sections'])
        print("\n section_paper_texts is  \n", section_paper_texts )
        total_ids = []
        section_references_ids = [[]] * len(parsed_outline['sections'])
        for i in range(len(parsed_outline['sections'])):
            descriptions = parsed_outline['subsection_descriptions'][i]  # 每个章节小节的描述 数组
            for d in descriptions:
                references_ids = self.db.get_ids_from_query(d, num=rag_num, shuffle=False)  # 根据描述获取相关论文的id
                total_ids += references_ids
                section_references_ids[i].append(references_ids)  # 将每个章节小节的相关论文id存入section_references_ids，ids是按section划分的
        
        # 根据论文id 获得所有相关论文的信息
        total_references_infos = self.db.get_paper_info_from_ids(list(set(total_ids)))  # 获取所有相关论文的信息
        temp_title_dic = {p['id']: p['title'] for p in total_references_infos}
        temp_abs_dic = {p['id']: p['abs'] for p in total_references_infos}
    
        # 获得每个section的相关论文信息，构造section_paper_texts
        for i in range(len(parsed_outline['sections'])):
            for references_ids in section_references_ids[i]:
                references_titles = [temp_title_dic[_] for _ in references_ids]
                references_papers = [temp_abs_dic[_] for _ in references_ids]
                paper_texts = '' 
                for t, p in zip(references_titles, references_papers):
                    paper_texts += f'---\n\npaper_title: {t}\n\npaper_content:\n\n{p}\n'
                paper_texts += '---\n'
        
                section_paper_texts[i].append(paper_texts)
    
        # 开启多线程写小节
        thread_l = []
        for i in range(len(parsed_outline['sections'])):
            thread = threading.Thread(target=self.write_subsection_with_reflection, args=(section_paper_texts[i], topic, outline, parsed_outline['sections'][i], 
                                                                                          parsed_outline['subsections'][i], parsed_outline['subsection_descriptions'][i], section_content, i, rag_num, str(subsection_len)))
            thread_l.append(thread)
            thread.start()
            time.sleep(0.1)
        for thread in thread_l:
            thread.join()
        
        raw_survey = self.generate_document(parsed_outline, section_content)
        raw_survey_with_references, raw_references = self.process_references(raw_survey)
        
        logging.info("Raw survey generated successfully.")
        logging.info(f"Raw survey with references: {raw_survey_with_references}")
        logging.info(f"Raw references: {raw_references}")
    
        if refining:
            final_section_content = self.refine_subsections(topic, outline, section_content)
            refined_survey = self.generate_document(parsed_outline, final_section_content)
            refined_survey_with_references, refined_references = self.process_references(refined_survey)
            
            logging.info("Refined survey generated successfully.")
            logging.info(f"Refined survey with references: {refined_survey_with_references}")
            logging.info(f"Refined references: {refined_references}")
    
            return (raw_survey + '\n', raw_survey_with_references + '\n', raw_references, 
                    refined_survey + '\n', refined_survey_with_references + '\n', refined_references)
        else:
            return raw_survey + '\n', raw_survey_with_references + '\n', raw_references

    def compute_price(self):
        """
        计算API调用的成本
        Returns:
            计算得到的成本价格
        """
        return self.token_counter.compute_price(input_tokens=self.input_token_usage, output_tokens=self.output_token_usage, model=self.model)

    def refine_subsections(self, topic, outline, section_content):
        """
        优化小节内容，提高连贯性和整体质量
        Args:
            topic: 主题
            outline: 大纲
            section_content: 章节内容
        Returns:
            优化后的章节内容
        """
        section_content_even = copy.deepcopy(section_content)
        
        thread_l = []
        for i in range(len(section_content)):
            for j in range(len(section_content[i])):
                if j % 2 == 0:
                    if j == 0:
                        contents = [''] + section_content[i][:2]
                    elif j == (len(section_content[i]) - 1):
                        contents = section_content[i][-2:] + ['']  
                    else:
                        contents = section_content[i][j-1:j+2]
                    thread = threading.Thread(target=self.lce, args=(topic, outline, contents, section_content_even[i], j))
                    thread_l.append(thread)
                    thread.start()
        for thread in thread_l:
            thread.join()


        final_section_content = copy.deepcopy(section_content_even)

        thread_l = []
        for i in range(len(section_content_even)):
            for j in range(len(section_content_even[i])):
                if j % 2 == 1:
                    if j == (len(section_content_even[i]) - 1):
                        contents = section_content_even[i][-2:] + ['']  
                    else:
                        contents = section_content_even[i][j-1:j+2]
                    thread = threading.Thread(target=self.lce, args=(topic, outline, contents, final_section_content[i], j))
                    thread_l.append(thread)
                    thread.start()
        for thread in thread_l:
            thread.join()
        
        return final_section_content

    def write_subsection_with_reflection(self, paper_texts_l, topic, outline, section, subsections, subdescriptions, res_l, idx, rag_num = 30, subsection_len = 1000, citation_num = 8, retry_cur_times = 0):
        """
        写入小节并进行反思
        Args:
            paper_texts_l: 论文文本列表
            topic: 主题
            outline: 大纲
            section: 章节名
            subsections: 小节列表
            subdescriptions: 小节描述列表
            res_l: 结果列表
            idx: 索引
            rag_num: RAG数量
            subsection_len: 小节长度
            citation_num: 引用数量
        """
        if retry_cur_times > 3:
            logging.error(f"章节 '{section}' 的小节处理失败，已重试3次")
            return []

        logging.info(f"开始处理章节 '{section}' 的小节")
        logging.info(f"小节数量: {len(subsections)}, 描述数量: {len(subdescriptions)}")
        
        if len(subsections) != len(subdescriptions):
            logging.error(f"小节数量({len(subsections)})与描述数量({len(subdescriptions)})不匹配")
            return []
            
        if not subsections:
            logging.warning(f"章节 '{section}' 没有小节")
            return []
            
        prompts = []
        for j in range(len(subsections)):
            try:
                subsection = subsections[j]
                description = subdescriptions[j]
                
                if not isinstance(paper_texts_l, list) or j >= len(paper_texts_l):
                    logging.error(f"paper_texts_l 格式错误或长度不足: {paper_texts_l}")
                    continue
                    
                prompt = self.__generate_prompt(SUBSECTION_WRITING_PROMPT, paras={
                    'OVERALL OUTLINE': outline,
                    'SUBSECTION NAME': subsection,
                    'DESCRIPTION': description,
                    'TOPIC': topic,
                    'PAPER LIST': paper_texts_l[j],
                    'SECTION NAME': section,
                    'WORD NUM': str(subsection_len),
                    'CITATION NUM': str(citation_num)
                })
                prompts.append(prompt)
                
            except Exception as e:
                logging.error(f"处理小节 {j} 时出错: {str(e)}")
                continue
                
        if not prompts:
            logging.error("没有生成任何提示")
            return []
            
        try:
            # self.input_token_usage += self.token_counter.num_tokens_from_list_string(prompts)
            contents = self.api_model.batch_chat(prompts, temperature=1)

            # contents = [self.api_model.chat(prompt) for prompt in prompts]            

            # self.output_token_usage += self.token_counter.num_tokens_from_list_string(contents)
            contents = [c.replace('<format>','').replace('</format>','') for c in contents]
            
            check_prompts = []
            for content, paper_texts in zip(contents, paper_texts_l[:len(contents)]):
                check_prompts.append(self.__generate_prompt(CHECK_CITATION_PROMPT, paras={
                    'SUBSECTION': content,
                    'TOPIC': topic,
                    'PAPER LIST': paper_texts
                }))
                
            # self.input_token_usage += self.token_counter.num_tokens_from_list_string(check_prompts)
            contents = self.api_model.batch_chat(check_prompts, temperature=1)
            # contents = [self.api_model.chat(prompt) for prompt in prompts]
            # self.output_token_usage += self.token_counter.num_tokens_from_list_string(contents)
            contents = [c.replace('<format>','').replace('</format>','') for c in contents]
            
            res_l[idx] = contents
            logging.info(f"章节 '{section}' 的小节处理完成")
            return contents
            
        except Exception as e:
            logging.error(f"生成内容时出错-重试{retry_cur_times+1}次: {str(e)}")
            return self.write_subsection_with_reflection(paper_texts_l, topic, outline, section, subsections, subdescriptions, res_l, idx, rag_num, subsection_len, citation_num, retry_cur_times + 1)
        
    def __generate_prompt(self, template, paras):
        """
        生成提示模板
        Args:
            template: 模板
            paras: 参数字典
        Returns:
            生成的提示文本
        """
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def generate_prompt(self, template, paras):
        """
        生成提示模板的公共方法
        Args:
            template: 模板
            paras: 参数字典
        Returns:
            生成的提示文本
        """
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def lce(self, topic, outline, contents, res_l, idx):
        """
        本地连贯性增强(Local Coherence Enhancement)
        Args:
            topic: 主题
            outline: 大纲
            contents: 内容列表
            res_l: 结果列表
            idx: 索引
        Returns:
            优化后的内容
        """
        '''
        You are an expert in artificial intelligence who wants to write a overall and comprehensive survey about [TOPIC].\n\
        You have created a overall outline below:\n\
        ---
        [OVERALL OUTLINE]
        ---
        <instruction>

        Now you need to help to refine one of the subsection to improve th ecoherence of your survey.

        You are provied with the content of the subsection "[SUBSECTION NAME]" along with the previous subsections and following subsections.

        Previous Subsection:
        --- 
        [PREVIOUS]
        ---

        Subsection to Refine: 
        ---
        [SUBSECTION]
        ---

        Following Subsection:
        ---
        [FOLLOWING]
        ---

        If the content of Previous Subsection is empty, it means that the subsection to refine is the first subsection.
        If the content of Following Subsection is empty, it means that the subsection to refine is the last subsection.

        Now edit the middle subsection to enhance coherence, remove redundancies, and ensure that it connects more fluidly with the previous and following subsections. 
        Please keep the essence and core information of the subsection intact. 
        </instruction>

        Directly return the refined subsection without any other informations:
        '''

        prompt = self.__generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline,'PREVIOUS': contents[0],\
                                                                          'FOLLOWING':contents[2],'TOPIC':topic,'SUBSECTION':contents[1]})
        # self.input_token_usage += self.token_counter.num_tokens_from_string(prompt)
        refined_content = self.api_model.chat(prompt, temperature=1).replace('<format>','').replace('</format>','')
        # self.output_token_usage += self.token_counter.num_tokens_from_string(refined_content)
     #   print(prompt+'\n---------------------------------\n'+refined_content)
        res_l[idx] = refined_content
        return refined_content.replace('Here is the refined subsection:\n','')

    def parse_outline(self, outline):
        """
        解析大纲内容，处理格式错误和重复项
        Args:
            outline: 大纲文本
        Returns:
            Dict: 包含标题、章节、描述等信息的字典
        """
        result = {
            "title": "",
            "sections": [],
            "section_descriptions": [],
            "subsections": [],
            "subsection_descriptions": []
        }
        
        # 记录当前正在处理的章节索引
        current_section_index = -1
        
        # 清理和预处理大纲文本
        lines = []
        for line in outline.split('\n'):
            # 移除空行
            if not line.strip():
                continue
            # 清理行尾的特殊字符和标点
            line = line.strip().rstrip('.,;]')
            # 如果行只包含数字和方括号，跳过
            if re.match(r'^\s*\[\d+\]\s*$', line):
                continue
            lines.append(line)
        
        # 用于检测重复的集合
        seen_sections = set()
        seen_subsections = {}  # 每个章节的小节集合
        
        for i, line in enumerate(lines):
            try:
                if line.startswith('# '):
                    # 处理标题
                    title = line[2:].strip()
                    if not result["title"]:  # 只取第一个标题
                        result["title"] = title
                    
                elif line.startswith('## '):
                    # 处理章节
                    section_title = line[3:].strip()
                    # 检查重复章节
                    if section_title in seen_sections:
                        logging.warning(f"发现重复章节: {section_title}")
                        continue
                        
                    seen_sections.add(section_title)
                    result["sections"].append(section_title)
                    current_section_index += 1
                    
                    # 初始化该章节的数据结构
                    result["subsections"].append([])
                    result["subsection_descriptions"].append([])
                    seen_subsections[current_section_index] = set()
                    
                    # 提取章节描述
                    if i + 1 < len(lines) and lines[i + 1].strip().startswith('Description:'):
                        desc = lines[i + 1].split('Description:', 1)[1].strip()
                        result["section_descriptions"].append(desc)
                    else:
                        result["section_descriptions"].append("")
                    
                elif line.startswith('### '):
                    # 处理小节
                    if current_section_index >= 0:
                        subsection_title = line[4:].strip()
                        
                        # 检查重复小节
                        if subsection_title in seen_subsections[current_section_index]:
                            logging.warning(f"在章节 '{result['sections'][current_section_index]}' 中发现重复小节: {subsection_title}")
                            continue
                            
                        seen_subsections[current_section_index].add(subsection_title)
                        result["subsections"][current_section_index].append(subsection_title)
                        
                        # 提取小节描述
                        if i + 1 < len(lines) and lines[i + 1].strip().startswith('Description:'):
                            desc = lines[i + 1].split('Description:', 1)[1].strip()
                            result["subsection_descriptions"][current_section_index].append(desc)
                        else:
                            result["subsection_descriptions"][current_section_index].append("")
                    else:
                        logging.error(f"发现未归属的小节: {line}")
                
            except Exception as e:
                logging.error(f"解析第 {i+1} 行时出错: {str(e)}\n行内容: {line}")
                continue
        
        # 验证和清理结果
        if not result["title"]:
            logging.warning("未找到有效的标题")
            result["title"] = "Survey"
            
        if not result["sections"]:
            logging.warning("未找到有效的章节")
            
        # 确保数据结构完整性
        while len(result["section_descriptions"]) < len(result["sections"]):
            result["section_descriptions"].append("")
            
        while len(result["subsections"]) < len(result["sections"]):
            result["subsections"].append([])
            result["subsection_descriptions"].append([])
            
        # 输出解析统计
        logging.info(f"解析完成: {len(result['sections'])} 个章节, " + 
                    f"{sum(len(subs) for subs in result['subsections'])} 个小节")
        
        # 输出重复项统计
        if len(seen_sections) < len(result["sections"]):
            logging.warning(f"删除了 {len(result['sections']) - len(seen_sections)} 个重复章节")
            
        total_duplicate_subsections = sum(
            len(result['subsections'][i]) - len(seen_subsections[i])
            for i in range(len(result['subsections']))
        )
        if total_duplicate_subsections > 0:
            logging.warning(f"删除了 {total_duplicate_subsections} 个重复小节")
        
        return result
    
    def parse_survey(self, survey, outline):
        """
        解析调查报告内容
        Args:
            survey: 调查报告文本
            outline: 大纲
        Returns:
            小节列表和描述列表
        """
        subsections, subdescriptions = [], []
        for i in range(100):
            if f'Subsection {i+1}' in outline:
                subsections.append(outline.split(f'Subsection {i+1}: ')[1].split('\n')[0])
                subdescriptions.append(outline.split(f'Description {i+1}: ')[1].split('\n')[0])
        return subsections, subdescriptions

    def process_references(self, survey):
        """
        处理参考文献
        Args:
            survey: 调查报告文本
        Returns:
            处理后的文本和引用
        """
        citations = self.extract_citations(survey)
        
        return self.replace_citations_with_numbers(citations, survey)

    def generate_document(self, parsed_outline, subsection_contents):
        """
        生成最终文档
        Args:
            parsed_outline: 解析后的大纲
            subsection_contents: 小节内容
        Returns:
            生成的完整文档
        """
        document = []
        
        # Append title
        title = parsed_outline['title']
        document.append(f"# {title}\n")
        
        # Iterate over sections and their content
        for i, section in enumerate(parsed_outline['sections']):
            document.append(f"## {section}\n")
            # Append subsections and their contents
            for j, subsection in enumerate(parsed_outline['subsections'][i]):
                document.append(f"### {subsection}\n")
          #      document.append(f"{parsed_outline['subsection_descriptions'][i][j]}\n")
                # Append detailed content for each subsection
                if i < len(subsection_contents) and j < len(subsection_contents[i]):
                    document.append(subsection_contents[i][j] + "\n")
        
        return "\n".join(document)

    def process_outlines(self, section_outline, sub_outlines):
        """
        处理大纲和子大纲
        Args:
            section_outline: 章节大纲
            sub_outlines: 子大纲列表
        Returns:
            处理后的大纲文本
        """
        res = ''
        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(outline=section_outline)
        res += f'# {survey_title}\n\n'
        for i in range(len(survey_sections)):
            section = survey_sections[i]
            res += f'## {i+1} {section}\nDescription: {survey_section_descriptions[i]}\n\n'
            subsections, subsection_descriptions = self.extract_subsections_subdescriptions(sub_outlines[i])
            for j in range(len(subsections)):
                subsection = subsections[j]
                res += f'### {i+1}.{j+1} {subsection}\nDescription: {subsection_descriptions[j]}\n\n'
        return res
    
    def generate_mindmap(self, subsection_citations, outline):
        """
        生成思维导图
        Args:
            subsection_citations: 小节引用
            outline: 大纲
        Returns:
            生成的思维导图文本
        """
        to_remove = outline.split('\n')
        for _ in to_remove:
            if not '#' in _:
                outline = outline.replace(_,'')
        subsections = re.findall(pattern=r'### (.*?)\n', string=outline)
        for subs, _ in zip(subsections,range(len(subsections))):
            outline = outline.replace(subs, subs+'\n'+str(subsection_citations[_]))
        to_remove = re.findall(pattern=r'\](.*?)#', string=outline)
        for _ in to_remove:
            outline = outline.replace(_,'')
        return outline

    def extract_citations(self, markdown_text):
        """
        从Markdown文本中提取引用
        Args:
            markdown_text: Markdown格式的文本
        Returns:
            提取的引用列表
        """
        # 正则表达式匹配方括号内的内容
        pattern = re.compile(r'\[(.*?)\]')
        matches = pattern.findall(markdown_text)
        # 分割引用，处理多引用情况，并去重
        citations = list()
        for match in matches:
            # 分割各个引用并去除空格
            parts = match.split(';')
            for part in parts:
                cit = part.strip()
                if cit not in citations:
                    citations.append(cit)
        return citations
    def replace_citations_with_numbers(self, citations, markdown_text):
        """
        将引用替换为编号
        Args:
            citations: 引用列表
            markdown_text: Markdown格式的文本
        Returns:
            替换后的文本和引用映射
        """
        ids = self.db.get_titles_from_citations(citations)

        citation_to_ids = {citation: idx for citation, idx in zip(citations, ids)}

        paper_infos = self.db.get_paper_info_from_ids(ids)
        temp_dic = {p['id']: {'title': p['title'], 'date': p['date'], 'journal_abbreviation': p['journal_abbreviation'],'chunk_id':p["chunk_id"]} for p in paper_infos}

        titles = [temp_dic[_]['title'] for _ in tqdm(ids)]
        dates = [temp_dic[_]['date'] for _ in tqdm(ids)]
        journal_abbreviations = [temp_dic[_]['journal_abbreviation'] for _ in tqdm(ids)]
        chunk_ids = [temp_dic[_]['chunk_id'] for _ in tqdm(ids)]

        ids_to_titles = {idx: title for idx, title in zip(ids, titles)}
        titles_to_ids = {title: idx for idx, title in ids_to_titles.items()}

        title_to_number = {title: num+1 for num, title in enumerate(titles)}
        title_to_number = {title: num+1 for num, title in enumerate(title_to_number.keys())}

        number_to_title = {num: title for title, num in title_to_number.items()}
        number_to_title_sorted = {key: number_to_title[key] for key in sorted(number_to_title)}

        def replace_match(match):
            citation_text = match.group(1)
            individual_citations = citation_text.split(';')
            numbered_citations = [str(title_to_number[ids_to_titles[citation_to_ids[citation.strip()]]]) for citation in individual_citations]
            return '<sup>' + '; '.join(numbered_citations) + '</sup>'

        updated_text = re.sub(r'\[(.*?)\]', replace_match, markdown_text)

        references_section = "\n\n## References\n\n"
        references = {num: titles_to_ids[title] for num, title in number_to_title_sorted.items()}
        for idx, title in number_to_title_sorted.items():
            t = title.replace('\n', '')
            date = dates[idx - 1]  # 编号从1开始，所以需要减1
            journal_abbreviation = journal_abbreviations[idx - 1]
            chunk_id = chunk_ids[idx - 1]
            references_section += f"[{idx}] {t}, {journal_abbreviation}, {date}, chunk {chunk_id}\n\n"

        return updated_text + references_section, references
    def replace_citations_with_numbers1(self, citations, markdown_text):
        """
        将引用替换为编号
        Args:
            citations: 引用列表
            markdown_text: Markdown格式的文本
        Returns:
            替换后的文本和引用映射
        """
        ids = self.db.get_titles_from_citations(citations)

        citation_to_ids = {citation: idx for citation, idx in zip(citations, ids)}

        paper_infos = self.db.get_paper_info_from_ids(ids)
        temp_dic = {p['id']:p['title'] for p in paper_infos}

        titles = [temp_dic[_] for _ in tqdm(ids)]

        ids_to_titles = {idx: title for idx, title in zip(ids, titles)}
        titles_to_ids = {title: idx for idx, title in ids_to_titles.items()}

        title_to_number = {title: num+1 for  num, title in enumerate(titles)}


        title_to_number = {title: num+1 for  num, title in enumerate(title_to_number.keys())}

        number_to_title = {num: title for  title, num in title_to_number.items()}
        number_to_title_sorted =  {key: number_to_title[key] for key in sorted(number_to_title)}

        def replace_match(match):

            citation_text = match.group(1)

            individual_citations = citation_text.split(';')

            numbered_citations = [str(title_to_number[ids_to_titles[citation_to_ids[citation.strip()]]]) for citation in individual_citations]

            return '[' + '; '.join(numbered_citations) + ']'
        

        updated_text = re.sub(r'\[(.*?)\]', replace_match, markdown_text)

        references_section = "\n\n## References\n\n"

        references = {num: titles_to_ids[title] for num, title in number_to_title_sorted.items()}
        for idx, title in number_to_title_sorted.items():
            t = title.replace('\n','')
            references_section += f"[{idx}] {t}\n\n"

        return updated_text + references_section, references
