import uuid
from docx import Document
import os
from typing import List, Dict, Any
import re
import logging
from typing import Optional

class ThesisProcessor:
    def __init__(self, log_file: Optional[str] = None):
        self.result = {
            "reference": [],
            "paragraph": [],
            "uuid": "",
            "info": {}
        }

        self._reset_temp_data()
        # 初始化日志
        self.logger = logging.getLogger("ThesisProcessor")
        self.logger.setLevel(logging.INFO)

        # 清除现有处理器
        for handler in self.logger.handlers[:]:
            self.logger.removeHandler(handler)

        # 新增处理器
        handler = logging.FileHandler(log_file, encoding='utf-8') if log_file else logging.StreamHandler()
        handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
        self.logger.addHandler(handler)

    def _reset_temp_data(self):
        """重置临时数据"""
        self.info1 = {}  # 作者信息/封面
        self.paragraphs = []  # 正文
        self.references = []  # 参考文献

    @staticmethod
    def generate_uuid() -> str:
        """生成UUID"""
        return str(uuid.uuid4())

    @staticmethod
    def get_docx_files(path: str) -> List[str]:
        """获取目录下所有docx文件"""
        return [f for f in os.listdir(path) if f.endswith(".docx")]
    @staticmethod
    def get_total_word_count(path: str) -> int:
        """获取全文总字数"""
        doc = Document(path)
        total_words = 0
        word_pattern = re.compile(r'[\u4e00-\u9fa5]|[a-zA-Z0-9]+')

        # 统计段落
        for para in doc.paragraphs:
            matches = word_pattern.findall(para.text)
            total_words += len(matches)

        # 统计表格
        for table in doc.tables:
            for row in table.rows:
                for cell in row.cells:
                    matches = word_pattern.findall(cell.text)
                    total_words += len(matches)
        return total_words

    @staticmethod
    def convert_date(date_str: str) -> str:
        """转换日期格式: 从'2023年5月20日'到'2023-05-20'"""
        match = re.match(r'(\d+)年(\d+)月(\d+)日', date_str)
        if match:
            year, month, day = match.groups()
            return f"{year}-{month.zfill(2)}-{day.zfill(2)}"
        return date_str

    @staticmethod
    def is_chinese_reference(text: str) -> bool:
        """判断参考文献是否为中文"""
        chinese_char_count = sum(1 for ch in text if '\u4e00' <= ch <= '\u9fff')
        ratio = chinese_char_count / len(text) if len(text) > 0 else 0
        return ratio > 0.3  # 可调阈值

    def _extract_title_abstract_keywords(self, paragraphs: List[Any]) -> tuple:
        """提取标题和中英文摘要"""
        title = ""
        type = ""
        abstract_ch = ""
        abstract_en = ""
        keyword_ch = ""
        keyword_en = ""
        kc_idx = ke_idx = -1

        for i, para in enumerate(paragraphs):
            try:
                text = para.text.replace(" ", "")
                if not title and text.startswith("毕业设计说明书"):
                    # 获取标题
                    j = i + 1
                    while j < len(paragraphs) and not paragraphs[j].text.strip():
                        j += 1
                    title = paragraphs[j].text
                    type = "毕业设计说明书"
                if not title and text.startswith("毕业论文"):
                    # 获取标题
                    j = i + 1
                    while j < len(paragraphs) and not paragraphs[j].text.strip():
                        j += 1
                    title = paragraphs[j].text
                    type = "毕业论文"

                if text.startswith("摘要"):
                    # 提取中文摘要
                    j = i + 1
                    while j < len(paragraphs):
                        if not paragraphs[j].text.strip():
                            j += 1
                            continue
                        if paragraphs[j].text.startswith("关键词"):
                            kc_idx = j
                            break
                        abstract_ch += paragraphs[j].text
                        j += 1

                if para.text.startswith("Abstract"):
                    # 提取英文摘要
                    j = i + 1
                    while j < len(paragraphs):
                        if not paragraphs[j].text.strip():
                            j += 1
                            continue
                        if paragraphs[j].text.startswith("Key words"):
                            ke_idx = j
                            break
                        abstract_en += paragraphs[j].text
                        j += 1
            except Exception as e:
                self.logger.warning(
                    f"摘要拆分失败（段落 {i}）: {paragraphs[i].text[:50]}...",
                    exc_info=True
                )
        # 提取关键词
        try:
            if kc_idx >= 0:
                keyword_text = paragraphs[kc_idx].text.replace(" ", "")
                keyword_ch = re.split(r"[：:]", keyword_text, maxsplit=1)[-1].strip()
        except Exception as e:
            self.logger.warning(
                f"中文关键词拆分失败（{paragraphs[kc_idx].text[:50]}...",
                exc_info=True
            )

        try:
            if ke_idx >= 0:
                keyword_text = paragraphs[ke_idx].text.replace(" ", "")
                keyword_en = re.split(r"[：:]", keyword_text, maxsplit=1)[-1].strip()
        except Exception as e:
            self.logger.warning(
                f"英文关键词拆分失败（{paragraphs[ke_idx].text[:50]}...",
                exc_info=True
            )
        return title, type, abstract_ch, abstract_en, keyword_ch, keyword_en

    def _extract_references(self, paragraphs: List[Any], ref_start: int, ref_end: int) -> List[Dict]:
        references = []
        order = 0
        for i in range(ref_start + 1, ref_end):
            try:
                order += 1
                ref_text = paragraphs[i].text.strip()
                ref_text = ref_text.replace("，", ",").replace("．", ".")
                ref_text = re.sub(r'^\[\d+\]\s*', '', ref_text).strip()
                if not ref_text:
                    continue

                # 初始化参考文献信息
                ref_info = {
                    "authors": [],
                    "title": "",
                    "type": "",
                    "periodical": "",
                    "publishedTime": "",
                    "order": order,
                    "language": "",
                    "raw_text": ref_text  # 保留原始文本
                }
                if self.is_chinese_reference(ref_text):
                    ref_info["language"] = "1"
                    # 尝试分割出类型标记（如[N]、[J]等）
                    type_match = re.search(r'\[([A-Z])\]', ref_text)
                    if type_match:
                        ref_type = type_match.group(1)
                        ref_info["type"] = ref_type
                        parts = ref_text.split(f'[{ref_type}]')
                        if len(parts) >= 2:
                            before_type = parts[0].strip()
                            author_title_split = re.split(r'\.', before_type, maxsplit=1)

                            if len(author_title_split) > 1:
                                #有作者
                                ref_info["authors"] = author_title_split[0].strip()
                                ref_info["title"] = author_title_split[1].strip()
                            else:
                                # 没有作者
                                ref_info["authors"] = ""
                                ref_info["title"] = before_type.strip()

                            # 清理作者中的"等"字
                            if ref_info["authors"]:
                                ref_info["authors"] = re.sub(r'[,，、]\s*等\.?$', '', ref_info["authors"])

                            # 处理类型标记后的内容
                            after_type = parts[1].strip()
                            pattern = r"^(.*?)[,\.，]\s*(\d{4}|\d{4}-\d{4}|\d{4}–\d{4})"
                            match = re.match(pattern, after_type)

                            if match:
                                ref_info["periodical"] = match.group(1).strip('.')
                                year_match = re.search(r'\d{4}', match.group(2).strip())
                                if year_match:
                                    ref_info["publishedTime"] = year_match.group()
                            else:
                                ref_info["periodical"] = ""
                                ref_info["publishedTime"] = ""
                else:
                    ref_info["language"] = "0"
                    type_match = re.search(r'\[([A-Z])\]', ref_text)
                    if type_match:
                        ref_type = type_match.group(1)
                        ref_info["type"] = ref_type
                        parts = ref_text.split(f'[{ref_type}]')
                        if len(parts) >= 2:
                            before_type = parts[0].strip()
                            author_title_split = re.split(r'\.(?=[A-Z])',
                                                          before_type, maxsplit=1)
                            if len(author_title_split) > 1:
                                # 有作者
                                ref_info["authors"] = author_title_split[0].strip()
                                ref_info["title"] = author_title_split[1].strip()
                            else:
                                # 没有作者
                                ref_info["authors"] = ""
                                ref_info["title"] = before_type.strip()

                            # 清理作者中的"et al"字
                            if ref_info["authors"]:
                                ref_info["authors"] = re.sub(r'[,，、]\s*et al\.?$', '', ref_info["authors"])

                            # 处理类型标记后的内容
                            after_type = parts[1].strip()
                            pattern = r"^(.*?)[,\.，]\s*(\d{4}|\d{4}-\d{4}|\d{4}–\d{4})"
                            match = re.match(pattern, after_type)

                            if match:
                                ref_info["periodical"] = match.group(1).strip('.')
                                year_match = re.search(r'\d{4}', match.group(2).strip())
                                if year_match:
                                    ref_info["publishedTime"] = year_match.group()
                            else:
                                ref_info["periodical"] = ""
                                ref_info["publishedTime"] = ""
                references.append({"info": ref_info})
            except Exception as e:
                self.logger.warning(
                    f"参考文献拆分失败（段落 {i}）: {paragraphs[i].text[:100]}",
                    exc_info=True
                )
        return references

    def _extract_content_structure(self, paragraphs: List[Any], start_idx: int, end_idx: int) -> List[Dict]:
        """提取正文结构，一级标题下的内容合并为一个整体，并保存章节序号"""
        content = []
        current_section = None

        # 一级标题: 1 绪论
        level1_pattern = re.compile(r'^\s*(\d+)\s+(.+?)\s*$')
        # 二级标题: 1.1 小节
        level2_pattern = re.compile(r'^\s*(\d+\.\d+)\s+(.+?)\s*$')
        # 三级标题: 1.1.1 小节
        level3_pattern = re.compile(r'^\s*(\d+\.\d+\.\d+)\s+(.+?)\s*$')

        for i in range(start_idx, end_idx):
            try:
                para = paragraphs[i]
                text = para.text.strip()
                if not text:
                    continue

                # 检测是否是一级标题
                m1 = level1_pattern.match(text)
                if m1:
                    # 遇到新一级标题时，保存前一个 section
                    if current_section:
                        content.append(current_section)

                    current_section = {
                        "level": 1,
                        "order": m1.group(1),  # 保存序号，如 "1"
                        "title": m1.group(2).strip(),  # 保存标题文本，如 "绪论"
                        "content": ""  # 拼接正文内容
                    }
                    continue

                # 检测是否是二级或三级标题
                m2 = level2_pattern.match(text)
                m3 = level3_pattern.match(text)
                if (m2 or m3) and current_section:
                    # 二/三级标题拼到正文字符串
                    number = (m2 or m3).group(1)
                    sub_title = (m2 or m3).group(2).strip()
                    current_section["content"] += f"\n{number} {sub_title}"
                    continue

                # 普通正文段落，拼接到当前一级标题的内容中
                if current_section:
                    if not text.startswith(('第', '页', '©', 'Copyright')):
                        current_section["content"] += "\n" + text

            except Exception as e:
                self.logger.warning(
                    f"正文拆分失败（段落 {i}）: {text[:100]}... 错误: {str(e)}",
                    exc_info=True
                )

        # 添加最后一个 section
        if current_section:
            content.append(current_section)

        return content

    def process_document(self, file_path: str) -> bool:
        """处理单个文档"""
        try:
            self._reset_temp_data()
            doc = Document(file_path)
            paragraphs = list(doc.paragraphs)  # 转换为列表提高访问效率

            # 1. 提取封面信息
            if doc.tables and len(doc.tables) > 0:
                table = doc.tables[0]
                info = [table.cell(i, 1).text for i in range(len(table.rows))]
                teachers = []
                if table.cell(2, 0).text:
                    teachers.extend([t for t in re.split(r"[、,，\s]+", info[2].strip()) if t])
                if not table.cell(3, 0).text.strip():
                    teachers.extend(info[3])
                    teacher1 = teachers[0] if len(teachers) > 0 else ""
                    teacher2 = teachers[1] if len(teachers) > 1 else ""
                    self.info1 = {
                        "author": info[0],
                        "number": info[1],
                        "teacher1": re.sub(r"（.*?）", "", teacher1),
                        "teacher2": re.sub(r"（.*?）", "", teacher2),
                        "academy": info[3] if teacher2 == "" else info[4],  # 如果 info[3] 是老师，则学院要往后挪
                        "major": info[4] if teacher2 == "" else info[5],
                        "publishedTime": self.convert_date(info[5] if teacher2 == "" else info[6]),
                        "wordCount": self.get_total_word_count(file_path)
                    }
                else:
                    teacher1 = teachers[0] if len(teachers) > 0 else ""
                    teacher2 = teachers[1] if len(teachers) > 1 else ""
                    self.info1 = {
                        "author": info[0],
                        "number": info[1],
                        "teacher1": re.sub(r"（.*?）", "", teacher1),
                        "teacher2": re.sub(r"（.*?）", "", teacher2),
                        "academy": info[3],
                        "major": info[4],
                        "publishedTime": self.convert_date(info[5]),
                        "wordCount": self.get_total_word_count(file_path)
                    }
            # 2. 提取标题、摘要和关键词
            title, type, abstract_ch, abstract_en, keyword_ch, keyword_en = self._extract_title_abstract_keywords(paragraphs)
            self.info1.update({
                "title": title.replace(" ", ""),
                "type": type,
                "abstractCh": abstract_ch,
                "abstractEn": abstract_en,
                "keywordsCh": keyword_ch,
                "keywordsEn": keyword_en
            })

            # 3. 定位正文、参考文献和附录位置
            current = refer = appendix = 0
            for i, para in enumerate(paragraphs):
                if re.search(r"^1\s+[\u4e00-\u9fa5]+$",para.text):
                    current = i
                if "参考文献"==para.text.replace(" ",""):
                    refer = i
                if "附录"==para.text.replace(" ",""):
                    appendix = i
                    break
            # 4. 提取参考文献
            if refer > 0:
                ref_end = appendix if appendix > refer else len(paragraphs)
                self.references = self._extract_references(paragraphs, refer, ref_end)

            # 5. 提取正文结构
            if current > 0:
                content_end = refer if refer > current else len(paragraphs)
                self.paragraphs = self._extract_content_structure(paragraphs, current, content_end)

            # 6. 保存结果
            self.result.update({
                "reference": self.references,
                "paragraph": self.paragraphs,
                "uuid": self.generate_uuid(),
                "info": self.info1
            })
            return self.result
        except Exception as e:
            self.logger.error(
                f"处理文件失败: {file_path}",
                exc_info=True  # 记录完整堆栈信息
            )
            return False


