"""Survey generator module using LangChain."""
from typing import Dict, List, Optional, Tuple, NamedTuple
import re
from dataclasses import dataclass
from pathlib import Path

from langchain_core.output_parsers import StrOutputParser
from tqdm import tqdm

from llm import llm
from prompts import *
from retriever import retriever, PaperChunk
from config import OUTPUT_DIR
from utils import parallel_process, parallel_outline_generate, OutlineData

class SectionData(NamedTuple):
    """Data needed to generate subsections for a section."""
    section_name: str
    section_description: str
    overall_outline: str
    topic: str
    paper_list: list

class SubsectionData(NamedTuple):
    """Data needed to generate content for a subsection."""
    section_name: str
    subsection_name: str
    subsection_description: str
    overall_outline: str
    topic: str
    paper_list: str
    min_words: int

@dataclass
class Section:
    """Section data class."""
    name: str
    description: str
    subsections: List[Tuple[str, str]] = None  # List of (name, description) tuples

@dataclass
class Survey:
    """Survey data class."""
    title: str
    sections: List[Section]
    content: Optional[Dict[str, str]] = None  # subsection_name -> content

class SurveyGenerator:
    """Academic survey generator."""
    
    def __init__(self):
        self.chain = llm | output_parser
        
    def _parse_outline(self, outline: str) -> Tuple[str, List[Section]]:
        """Parse outline text into structured format."""
        lines = outline.strip().split("\n")
        title = outline.split('Title: ')[1].split('\n')[0]
        sections = []
        
        current_section = None
        for line in lines[1:]:
            if not line.strip():
                continue
                
            if line.startswith("Section"):
                if current_section:
                    sections.append(current_section)
                name = line.split(": ")[1].strip()
                current_section = Section(name=name, description="")
            elif line.startswith("Description"):
                if current_section:
                    current_section.description = line.split(": ")[1].strip()
                    
        if current_section:
            sections.append(current_section)
            
        return title, sections
        
    def _parse_subsections(self, text: str) -> List[Tuple[str, str]]:
        """Parse subsection text into list of (name, description) tuples."""
        lines = text.strip().split("\n")
        subsections = []
        
        current_name = None
        for line in lines:
            if not line.strip():
                continue
                
            if line.startswith("Subsection"):
                current_name = line.split(": ")[1].strip()
            elif line.startswith("Description"):
                if current_name:
                    description = line.split(": ")[1].strip()
                    subsections.append((current_name, description))
                    current_name = None
                    
        return subsections
    
    def _generate_prompt(self, template, paras):
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def parse_outline(self, outline):
        result = {
            "title": "",
            "sections": [],
            "section_descriptions": [],
            "subsections": [],
            "subsection_descriptions": []
        }

        # Split the outline into lines
        lines = outline.split('\n')

        for i, line in enumerate(lines):
            # Match title, sections, subsections and their descriptions
            if line.startswith('# '):
                result["title"] = line[2:].strip()
            elif line.startswith('## '):
                result["sections"].append(line[3:].strip())
                # Extract the description in the next line
                if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                    result["section_descriptions"].append(lines[i + 1].split('Description:', 1)[1].strip())
                    result["subsections"].append([])
                    result["subsection_descriptions"].append([])
            elif line.startswith('### '):
                if result["subsections"]:
                    result["subsections"][-1].append(line[4:].strip())
                    # Extract the description in the next line
                    if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                        result["subsection_descriptions"][-1].append(lines[i + 1].split('Description:', 1)[1].strip())

        return result
    def extract_subsections_subdescriptions(self, outline):
        subsections, subdescriptions = [], []
        for i in range(100):
            if f'Subsection {i + 1}' in outline:
                subsections.append(outline.split(f'Subsection {i + 1}: ')[1].split('\n')[0])
                subdescriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\n')[0])
        return subsections, subdescriptions
        
    def process_outlines(self, section_outline, sub_outlines) -> str:
        res = ''
        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(
            outline=section_outline)
        res += f'# {survey_title}\n\n'
        for i in range(len(survey_sections)):
            section = survey_sections[i]
            res += f'## {i + 1} {section}\nDescription: {survey_section_descriptions[i]}\n\n'
            if i == 0:
                continue
            subsections, subsection_descriptions = self.extract_subsections_subdescriptions(sub_outlines[i-1])
            for j in range(len(subsections)):
                subsection = subsections[j]
                res += f'### {i + 1}.{j + 1} {subsection}\nDescription: {subsection_descriptions[j]}\n\n'
        return res
    
    def parse_markdown_outline(self, markdown_outline: str) -> Survey:
        result = {
        "title": "",
        "sections": [],
        "section_descriptions": [],
        "subsections": [],
        "subsection_descriptions": []
        }

        # Split the outline into lines
        lines = markdown_outline.split('\n')

        for i, line in enumerate(lines):
            # Match title, sections, subsections and their descriptions
            if line.startswith('# '):
                result["title"] = line[2:].strip()
            elif line.startswith('## '):
                result["sections"].append(line[3:].strip())
                # Extract the description in the next line
                if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                    result["section_descriptions"].append(lines[i + 1].split('Description:', 1)[1].strip())
                    result["subsections"].append([])
                    result["subsection_descriptions"].append([])
            elif line.startswith('### '):
                if result["subsections"]:
                    result["subsections"][-1].append(line[4:].strip())
                    # Extract the description in the next line
                    if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                        result["subsection_descriptions"][-1].append(lines[i + 1].split('Description:', 1)[1].strip())

        return result
    
    def remove_descriptions(self, text):
        """
        移除文本中所有以 "Description" 开头的行。

        Args:
            text (str): 包含多行文本的字符串。

        Returns:
            str: 移除以 "Description" 开头的行后的文本。
        """
        lines = text.split('\n')

        filtered_lines = [line for line in lines if not line.strip().startswith("Description")]

        result = '\n'.join(filtered_lines)

        return result
    
    def extract_title_sections_descriptions(self, outline):
        title = outline.split('Title: ')[1].split('\n')[0]
        sections, descriptions = [], []
        for i in range(100):
            if f'Section {i + 1}' in outline:
                sections.append(outline.split(f'Section {i + 1}: ')[1].split('\n')[0])
                descriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\n')[0])
        return title, sections, descriptions
    
    def generate_document(self, parsed_outline, subsection_contents):
        """Generate final document from outline and contents."""
        document = []

        # Append title
        title = parsed_outline['title']
        document.append(f"# {title}\n")
        print(f"Added title with length {len(title)}")

        # Iterate over sections and their content
        for i, section in enumerate(parsed_outline['sections']):
            document.append(f"## {section}\n")
            print(f"Added section {i+1} with length {len(section)}")
            if i == 0:
                content = subsection_contents[i][0]
                document.append(content + "\n")
                continue
            # Append subsections and their contents
            for j, subsection in enumerate(parsed_outline['subsections'][i]):
                document.append(f"### {subsection}\n")
                print(f"Added subsection {i+1}.{j+1} with length {len(subsection)}")
                
                # Append detailed content for each subsection
                if i < len(subsection_contents) and j < len(subsection_contents[i]):
                    content = subsection_contents[i][j]
                    document.append(content + "\n")
                    print(f"Added content for {i+1}.{j+1} with length {len(content)}")
                else:
                    print(f"Warning: Missing content for subsection {i+1}.{j+1}")

        result = "\n".join(document)
        print(f"Final document length = {len(result)}")
        return result
    

    
    def extract_citations(self, markdown_text):
        def extract_year(text):
            match = re.search(r'(19|20)\d{2}', text)  # 只匹配1900-2099的4位数字
            return match.group(0) if match else None
        
        # 匹配 [] 内的内容
        pattern = re.compile(r'\[(.*?)\]')
        matches = pattern.findall(markdown_text)

        # 初始化一个列表来存储保留的 matches
        filtered_matches = []

        # 遍历 matches
        i = 0
        while i < len(matches):
            # 判断元素是否为英文开头的字符串
            if re.match(r'^[a-zA-Z]', matches[i]):
                # 判断下一个元素是否是 0-9 的数字
                if i + 1 < len(matches) and re.match(r'^\d+$', matches[i + 1]):
                    # 如果都符合条件，则保留
                    paper_infos = matches[i].strip().split(',')
                    if len(paper_infos) < 3:
                        year = extract_year(paper_infos[-1])
                        if year:
                            publish = paper_infos[-1].replace(year, '')
                            paper_info = f'{paper_infos[0].strip()}, {publish.strip()}, {year.strip()}'
                        else:
                            paper_info = matches[i].strip()
                        filtered_matches.append(paper_info)
                    else:
                        match = re.search(r'(19|20)\d{2}', paper_infos[1])
                        if match:
                            publish = paper_infos[1].replace(match.group(0), '')
                            paper_info = f'{paper_infos[0].strip()}, {publish.strip()}, {paper_infos[2].strip()}'
                            filtered_matches.append(paper_info)
                        else:
                            filtered_matches.append(matches[i].strip())
                    filtered_matches.append(matches[i + 1].strip())
                    i += 2  # 跳过下一个元素，因为它已经被处理
                else:
                    # 如果下一个元素不是数字，则跳过这两个元素
                    i += 2
            else:
                # 如果不是英文开头的字符串，则跳过
                i += 1

        # 删除 raw_survey 中不符合条件的内容
        for match in matches:
            if match not in filtered_matches:
                content_filtered = markdown_text.replace(f'[{match}]', '')

        return filtered_matches, content_filtered
    
    def replace_citations_with_numbers(self, citations, markdown_text):
        # 初始化计数器
        counter = 1
        # 用于记录每对 [英文][数字] 的第一次出现的序号
        pair_to_number = {}

        references_section = "\n\n## References\n\n"

        # 遍历 filtered_matches，每次处理一对 [英文][数字]
        for i in range(0, len(citations), 2):
            # 构造当前 [英文][数字] 对的键
            pair_key = (citations[i], citations[i+1])

            # 如果这对 [英文][数字] 已经记录过，则使用记录的序号
            if pair_key in pair_to_number:
                replacement_number = pair_to_number[pair_key]
            else:
                # 否则，分配一个新的序号并记录到字典中
                replacement_number = counter
                pair_to_number[pair_key] = replacement_number
                references_section += f"[{replacement_number}] {citations[i]}, chunk {citations[i + 1]}\n\n"
                counter += 1

            # 构造要替换的模式 [英文][数字]
            pattern = re.compile(r'\[{}\s*\]\[\s*{}\]'.format(
                re.escape(citations[i]),  # 转义英文部分
                re.escape(citations[i+1]) # 转义数字部分
            ))
            # 构造要替换的模式 <sup>英文</sup><sup>数字</sup>
            # pattern = re.compile(r'<sup>{}</sup><sup>{}</sup>'.format(
            #     re.escape(citations[i]),  # 转义英文部分
            #     re.escape(citations[i + 1])  # 转义数字部分
            # ))

            # 替换为对应的序号
            markdown_text = pattern.sub(f'<sup>{replacement_number}</sup>', markdown_text)
            # 定义正则表达式模式，用于匹配方括号及其内部的内容
        pattern = r'\[.*?\]'
        # 使用 re.sub() 函数将匹配到的内容替换为空字符串
        markdown_text = re.sub(pattern, '', markdown_text)

        return markdown_text + references_section
        
    def _generate_subsections(self, data: SectionData) -> str:
        """Generate subsections for a single section."""
        keywords = ['future', 'conclusion']
        sub_outlines = []
        for i in range(len(data.paper_list)):
            prompt = self._generate_prompt(SUBSECTION_OUTLINE_PROMPT, {
                'OVERALL OUTLINE': data.overall_outline,
                'SECTION NAME': data.section_name,
                'SECTION DESCRIPTION': data.section_description,
                'TOPIC': data.topic,
                'PAPER LIST': data.paper_list[i]
            })
            sub_outline = self.chain.invoke(prompt)
            sub_outlines.append(sub_outline)
        prompt = self._generate_prompt(MERGING_SUBSECTION_PROMPT, paras={'TOPIC': data.topic, 'SECTION_NAME': data.section_name, 'OUTLINE LIST': '\n'.join(sub_outlines)})
        sub_outline = self.chain.invoke(prompt)
        sub_outline_copy = sub_outline.replace('<format>', '').replace('</format>', '')
        sub_outline_list = sub_outline_copy.split("\n\n")
        if not any(x in data.section_name for x in keywords) and any([x in sub_outline_list[-1] for x in keywords]):
            sub_outline = "\n\n".join(sub_outline_list[:-1])
            sub_outline = "<format>\n" + sub_outline + "\n</format>"
        return sub_outline

    def _generate_subsection_content(self, data: SubsectionData) -> str:
        """Generate content for a single subsection."""
        prompt = self._generate_prompt(SUBSECTION_WRITING_PROMPT, {
            'OVERALL OUTLINE': data.overall_outline,
            'SUBSECTION NAME': data.subsection_name,
            'DESCRIPTION': data.subsection_description,
            'TOPIC': data.topic,
            'PAPER LIST': data.paper_list,
            'SECTION NAME': data.section_name,
            'WORD NUM': str(data.min_words)
        })
        return self.chain.invoke(prompt)

    def _generate_initial_outline(self, data: OutlineData) -> str:
        """Generate initial outline from section data."""
        if not data.paper_text:  # Skip if no content
            return None
            
        prompt = self._generate_prompt(ROUGH_OUTLINE_PROMPT, {
            'PAPER LIST': data.paper_text,
            'TOPIC': data.topic,
            'SECTION NUM': str(data.section_num)
        })
        return self.chain.invoke(prompt)

    def generate_outline(self, topic: str, section_num: int = 5) -> str:
        """Generate survey outline following a multi-step process."""
        # Step 1: Get relevant papers with their sections

        chunks = retriever.search_papers(topic)
        if not chunks:
            raise ValueError(f"No relevant papers or chunks found for topic: {topic}")
            
        paper_texts = retriever.consolidate_chunks(chunks)
        outline_data_list = []
        for paper_text in paper_texts:
            if paper_text:  # Only add if we have content
                outline_data = OutlineData(
                section_type="all",
                paper_text=paper_text,
                topic=topic,
                section_num=section_num
                )
                outline_data_list.append(outline_data)
        
        outlines = parallel_outline_generate(
                outline_data_list,
                self._generate_initial_outline,
                desc="Generating initial outlines"
            )

        if not outlines:
            raise ValueError(f"Failed to generate outline for topic: {topic}")
        
        outline_text = ""
        for i, outline in enumerate(outlines):
            outline_text += f"---\noutline_id: {i}\n\noutline_content:\n\n{outline}\n"
        outline_text += "---\n"

        prompt = self._generate_prompt(MERGING_OUTLINE_PROMPT, {
            'OUTLINE LIST': outline_text,
            'TOPIC': topic
        })
        merged_outline = self.chain.invoke(prompt)
        
        # papers = retriever.get_relevant_papers(topic)
        # if not papers:
        #     # If no papers found, use search_papers directly
        #     chunks = retriever.search_papers(topic)
        #     if not chunks:
        #         raise ValueError(f"No relevant papers or chunks found for topic: {topic}")
                
        #     paper_text = retriever.consolidate_chunks(chunks)[0]
        #     outline_data = OutlineData(
        #         section_type="all",
        #         paper_text=paper_text,
        #         topic=topic,
        #         section_num=section_num
        #     )
        #     merged_outline = self._generate_initial_outline(outline_data)
        #     if not merged_outline:
        #         raise ValueError(f"Failed to generate outline for topic: {topic}")
        # else:
        #     # Original flow with multiple outlines and merging
        #     outline_data_list = []
        #     for section_type in ["abstract", "introduction", "related_works"]:
        #         section_texts = retriever.consolidate_sections(papers, section_type)
        #         if section_texts:  # Only add if we have content
        #             outline_data = OutlineData(
        #                 section_type=section_type,
        #                 paper_text=section_texts[0],
        #                 topic=topic,
        #                 section_num=section_num
        #             )
        #             outline_data_list.append(outline_data)
            
        #     # Generate outlines in parallel
        #     outlines = parallel_outline_generate(
        #         outline_data_list,
        #         self._generate_initial_outline,
        #         desc="Generating initial outlines"
        #     )
            
        #     if not outlines:
        #         raise ValueError(f"No relevant papers found for topic: {topic}")
                
        #     # Step 3: Merge outlines
        #     outline_text = ""
        #     for i, outline in enumerate(outlines):
        #         outline_text += f"---\noutline_id: {i}\n\noutline_content:\n\n{outline}\n"
        #     outline_text += "---\n"

        #     prompt = self._generate_prompt(MERGING_OUTLINE_PROMPT, {
        #         'OUTLINE LIST': outline_text,
        #         'TOPIC': topic
        #     })
        #     merged_outline = self.chain.invoke(prompt)
        
        # Step 4: Generate subsections in parallel
        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(merged_outline)
        
        # Prepare data for parallel processing
        section_data_list = []
        for index, (section_name, section_description) in enumerate(zip(survey_sections, survey_section_descriptions)):
            if index == 0:
                continue
            query = f"topic: {topic}\nsection name: {section_name}\nsection description: {section_description}."
            chunks = retriever.search_papers(query)
            paper_list = retriever.consolidate_chunks(chunks)
            
            section_data = SectionData(
                section_name=section_name,
                section_description=survey_section_descriptions[survey_sections.index(section_name)],
                overall_outline=merged_outline,
                topic=topic,
                paper_list=paper_list
            )
            section_data_list.append(section_data)
            
        # Generate subsections in parallel
        sub_sections_list = parallel_process(
            section_data_list,
            self._generate_subsections,
            desc="Generating subsections"
        )
        
        # Process results
        merged_outline = self.process_outlines(merged_outline, sub_sections_list)
        prompt = self._generate_prompt(EDIT_FINAL_OUTLINE_PROMPT2, {
            'OVERALL OUTLINE': merged_outline
        })
        final_outline = self.chain.invoke(prompt).replace('<format>\n', '').replace('</format>', '')
        # with open(f'./output/outline_{topic}.md', 'w', encoding='utf-8') as f:
        #     f.write(final_outline)
        # self.save_survey(content=final_outline, title="outline", status='raw')
        return final_outline

    def generate_content(self, topic: str, outline: str, min_words: int = 500) -> str:
        """Generate content for each subsection in parallel."""
        parsed_outline = self.parse_outline(outline)
        outline_wo_description = self.remove_descriptions(outline)
        
        # Prepare data for parallel processing
        subsection_data_list = []
        for i, section in enumerate(parsed_outline['sections']):
            if i == 0:
                section_description = parsed_outline['section_descriptions'][i]
                query = f"topic: {topic}\nsection name: {section}\nsection description: {section_description}."
                chunks = retriever.search_papers(query)
                paper_list = retriever.consolidate_chunks(chunks)[0]
                subsection_data = SubsectionData(
                    section_name=section,
                    subsection_name=section,
                    subsection_description=section_description,
                    overall_outline=outline_wo_description,
                    topic=topic,
                    paper_list=paper_list,
                    min_words=min_words
                )
                subsection_data_list.append(subsection_data)
                continue
            for j in range(len(parsed_outline['subsections'][i])):
                subsection_name = parsed_outline['subsections'][i][j]
                query = f"topic: {topic}. subsection: {subsection_name}"
                chunks = retriever.search_papers(query)
                paper_list = retriever.consolidate_chunks(chunks)[0]
                
                subsection_data = SubsectionData(
                    section_name=section,
                    subsection_name=subsection_name,
                    subsection_description=parsed_outline['subsection_descriptions'][i][j],
                    overall_outline=outline_wo_description,
                    topic=topic,
                    paper_list=paper_list,
                    min_words=min_words
                )
                subsection_data_list.append(subsection_data)
        
        # Generate content in parallel
        all_contents = parallel_process(
            subsection_data_list,
            self._generate_subsection_content,
            desc="Generating content"
        )
        
        # Organize contents back into sections
        section_contents = []
        current_index = 0
        for i in range(len(parsed_outline['sections'])):
            if i == 0:
                section_content = []
                content = all_contents[current_index]
                refined_content = content.replace('<format>\n', '').replace('</format>', '') if content.startswith("<format>") else content
                section_content.append(refined_content)
                current_index += 1
                section_contents.append(section_content)
                continue
            section_content = []
            for _ in range(len(parsed_outline['subsections'][i])):
                content = all_contents[current_index]
                refined_content = content.replace('<format>\n', '').replace('</format>', '') if content.startswith("<format>") else content
                section_content.append(refined_content)
                current_index += 1
            section_contents.append(section_content)
        
        # Generate final document
        raw_survey = self.generate_document(parsed_outline, section_contents)
        print(f"raw_survey length = {len(raw_survey)}")
        # self.save_survey(content=raw_survey, title=parsed_outline['title'], status='raw')
        
        citations, filtered_raw_survey = self.extract_citations(raw_survey)
        print(f"filtered_raw_survey length = {len(filtered_raw_survey)}")
        raw_survey_with_references = self.replace_citations_with_numbers(citations, filtered_raw_survey)
        print(f"raw_survey_with_references length = {len(raw_survey_with_references)}")
        self.save_survey(raw_survey_with_references, topic, status='raw_with_references')
        with open(f'./review.md', 'w', encoding='utf-8') as f:
            f.write(raw_survey_with_references)
        return raw_survey_with_references
        
    # def enhance_coherence(self, survey: Survey) -> Survey:
    #     """Enhance coherence between subsections."""
    #     for section in tqdm(survey.sections, desc="Enhancing coherence"):
    #         subsection_names = [name for name, _ in section.subsections]
            
    #         for i, subsection_name in enumerate(subsection_names):
    #             # Get previous and following subsections
    #             prev_content = survey.content.get(subsection_names[i-1], "") if i > 0 else ""
    #             next_content = survey.content.get(subsection_names[i+1], "") if i < len(subsection_names)-1 else ""
                
    #             # Enhance coherence
    #             refined_content = self.chain.invoke({
    #                 "topic": survey.title,
    #                 "previous": prev_content,
    #                 "following": next_content,
    #                 "subsection": survey.content[subsection_name]
    #             })
                
    #             survey.content[subsection_name] = refined_content
                
    #     return survey
        
    def save_survey(self, content: str, title: str, status: str, output_dir: Path = OUTPUT_DIR):
        """Save survey to markdown file."""
        try:
            # Ensure output directory exists
            output_dir.mkdir(parents=True, exist_ok=True)
            
            # Validate content
            if not content or len(content.strip()) == 0:
                print(f"Warning: Empty content being saved for {status}_{title}")
                return None, None
                
            output_path = f"{output_dir}/survey_{status}_{title}"
            print(f"Saving to {output_path}")
            print(f"Content length = {len(content)}")
            print(f"First 100 chars: {content[:100]}")
            
            # Save as markdown with explicit encoding and flush
            md_path = f'{output_path}.md'
            with open(md_path, 'w', encoding='utf-8', buffering=1) as f:
                chars_written = f.write(content)
                f.flush()
                print(f" Wrote {chars_written} chars to {md_path}")
            
            # Verify markdown file
            if Path(md_path).stat().st_size == 0:
                print(f"Error: {md_path} is empty after writing!")
            else:
                print(f"Success: {md_path} size is {Path(md_path).stat().st_size} bytes")
            
        except Exception as e:
            print(f"Error saving survey: {str(e)}")
            import traceback
            traceback.print_exc()

# Create singleton instance
generator = SurveyGenerator() 