import base64
import json
import logging
import re
from urllib.parse import unquote

from bs4 import BeautifulSoup

from crawl_base import ArticleCrawler

class ScienceDirectCrawler(ArticleCrawler):
    def __init__(self):
        super().__init__()

    def parse_article(self, html_content):
        """
        根据HTML内容解析文章，提取通讯作者的姓名和邮箱。
        首先尝试从__PRELOADED_STATE__解析，如果失败则使用HTML DOM解析方法。
        :param html_content: 页面的HTML内容
        :return: 一个包含通讯作者信息的列表，格式为[{'name': '作者姓名', 'email': '作者邮箱'}]
        """
        self.logger.info("开始从HTML内容解析通讯作者信息...")
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 方法1: 尝试从__PRELOADED_STATE__解析
        preloaded_state = self._extract_preloaded_state(soup)
        authors_list = self._parse_authors_from_state(preloaded_state)

        # 方法2: 如果第一种方法失败，使用HTML DOM解析方法
        if not authors_list:
            self.logger.info("__PRELOADED_STATE__方法未找到通讯作者，尝试HTML DOM解析方法...")
            authors_list = self._parse_authors_from_html_dom(soup)

        if not authors_list:
            self.logger.warning("两种解析方法都未找到任何通讯作者。")
        else:
            self.logger.info(f"成功找到 {len(authors_list)} 位通讯作者")
            
        return authors_list

    def _extract_preloaded_state(self, soup):
        """从HTML中提取并解析 window.__PRELOADED_STATE__。"""
        self.logger.info("正在尝试提取 __PRELOADED_STATE__。")
        script_tag = soup.find('script', string=re.compile(r'window\.__PRELOADED_STATE__'))
        if not script_tag:
            self.logger.error("未能找到 __PRELOADED_STATE__ 脚本标签。")
            return None

        json_str_match = re.search(r'window\.__PRELOADED_STATE__\s*=\s*(.*);', script_tag.string, re.DOTALL)
        if not json_str_match:
            self.logger.error("无法通过正则表达式从 __PRELOADED_STATE__ 脚本标签中提取JSON。")
            return None
        
        json_str = json_str_match.group(1)
        try:
            preloaded_state = json.loads(json_str)
            self.logger.info("成功提取并解析 __PRELOADED_STATE__。")
            return preloaded_state
        except json.JSONDecodeError as e:
            self.logger.error(f"解析 __PRELOADED_STATE__ JSON失败: {e}")
            return None

    def _parse_authors_from_state(self, preloaded_state):
        """直接从预加载状态中提取通讯作者的详细信息。"""
        authors_list = []
        if not preloaded_state:
            self.logger.warning("预加载状态为空，无法解析作者。")
            return authors_list

        try:
            author_content = preloaded_state.get('authors', {}).get('content', [])
            if not author_content:
                self.logger.warning("在预加载状态中找不到 'authors.content'。")
                return authors_list

            # 修正：使用正确的数据结构路径
            authors_data = author_content[0].get('$$', [])
            self.logger.info(f"找到 {len(authors_data)} 个作者数据")
            
            for author_data in authors_data:
                if author_data.get('#name') != 'author':
                    continue

                is_corresponding = False
                given_name, surname, email = None, None, None

                author_details = author_data.get('$$', [])
                self.logger.debug(f"处理作者，子元素数量: {len(author_details)}")
                
                for detail in author_details:
                    detail_name = detail.get('#name')
                    
                    # 检查是否为通讯作者（查找包含⁎符号的cross-ref）
                    if detail_name == 'cross-ref':
                        cross_ref_content = detail.get('$$', [])
                        for ref_item in cross_ref_content:
                            if ref_item.get('#name') == 'sup' and ref_item.get('_') == '⁎':
                                is_corresponding = True
                                self.logger.info("找到通讯作者标记 ⁎")
                                break
                    
                    # 提取姓名
                    elif detail_name == 'given-name':
                        given_name = detail.get('_')
                        self.logger.debug(f"提取到given-name: {given_name}")

                    elif detail_name == 'surname':
                        surname = detail.get('_')
                        self.logger.debug(f"提取到surname: {surname}")

                    # 提取邮箱
                    elif detail_name == 'encoded-e-address' and '__encoded' in detail:
                        encoded_str = detail['__encoded']
                        self.logger.debug(f"找到编码邮箱: {encoded_str[:50]}...")
                        try:
                            # 按照 Base64 -> UTF-8 -> URL -> JSON 的顺序解码
                            decoded_bytes = base64.b64decode(encoded_str)
                            decoded_url = unquote(decoded_bytes.decode('utf-8'))
                            email_data = json.loads(decoded_url)
                            email = email_data.get('_')
                            self.logger.info(f"成功解码邮箱: {email}")
                        except Exception as e:
                            self.logger.error(f"为一位作者解码邮箱失败: {e}")
                            self.logger.error(f"出错的编码字符串: {encoded_str}")
                            email = None
                
                # 只有通讯作者才添加到结果中
                if is_corresponding and given_name and surname:
                    full_name = f"{given_name} {surname}".strip()
                    authors_list.append({'name': full_name, 'email': email or 'N/A'})
                    self.logger.info(f"找到并解码了通讯作者: {full_name}, {email}")
                elif given_name and surname:
                    self.logger.debug(f"跳过非通讯作者: {given_name} {surname}")

        except (IndexError, KeyError, TypeError) as e:
            self.logger.error(f"从预加载状态解析作者时发生错误: {e}")
            import traceback
            self.logger.error(f"错误详情: {traceback.format_exc()}")

        self.logger.info(f"完成解析状态。找到 {len(authors_list)} 位通讯作者。")
        return authors_list

    def _parse_authors_from_html_dom(self, soup):
        """
        从HTML DOM结构中解析通讯作者信息（备用方法）
        查找包含⁎符号的作者名字和对应的邮箱信息
        :param soup: BeautifulSoup解析的HTML对象
        :return: 通讯作者信息列表
        """
        self.logger.info("开始使用HTML DOM方法解析通讯作者...")
        authors_list = []
        
        try:
            # 查找所有作者标签
            authors = soup.find_all("author")
            self.logger.info(f"找到 {len(authors)} 个author标签")
            
            for author in authors:
                # 检查是否为通讯作者（包含⁎符号）
                if author.find("sup", string="⁎"):
                    self.logger.info("找到包含⁎符号的通讯作者")
                    
                    # 提取姓名
                    given_name_tag = author.find('given-name')
                    surname_tag = author.find('surname')
                    
                    if given_name_tag and surname_tag:
                        given_name = given_name_tag.get_text(strip=True)
                        surname = surname_tag.get_text(strip=True)
                        full_name = f"{given_name} {surname}".strip()
                        
                        # 提取邮箱
                        email = None
                        email_tag = author.find("encoded-e-address")
                        if email_tag:
                            encoded = email_tag.get("__encoded")
                            if encoded:
                                try:
                                    # 解码邮箱地址
                                    decoded = base64.b64decode(encoded).decode("utf-8")
                                    # 先尝试URL解码
                                    try:
                                        decoded_url = unquote(decoded)
                                        email_data = json.loads(decoded_url)
                                    except:
                                        # 如果URL解码失败，直接解析JSON
                                        email_data = json.loads(decoded)
                                    
                                    email = email_data.get("_")
                                    self.logger.info(f"成功解码邮箱: {email}")
                                except Exception as e:
                                    self.logger.error(f"解码邮箱失败: {e}")
                                    self.logger.error(f"编码字符串: {encoded}")
                                    email = None
                        
                        # 添加到结果列表
                        authors_list.append({
                            'name': full_name, 
                            'email': email or 'N/A'
                        })
                        self.logger.info(f"HTML DOM方法找到通讯作者: {full_name}, {email}")
                    else:
                        self.logger.warning("找到⁎符号但无法提取完整姓名")
            
            # 如果没有找到author标签，尝试其他可能的结构
            if not authors:
                self.logger.info("未找到author标签，尝试查找其他作者信息结构...")
                # 可以在这里添加其他HTML结构的解析逻辑
                
        except Exception as e:
            self.logger.error(f"HTML DOM解析过程中发生错误: {e}")
            import traceback
            self.logger.error(f"错误详情: {traceback.format_exc()}")
        
        self.logger.info(f"HTML DOM方法完成解析。找到 {len(authors_list)} 位通讯作者。")
        return authors_list

if __name__ == '__main__':
    url = "https://www.sciencedirect.com/science/article/pii/S0169433224020178"
    crawler = ScienceDirectCrawler()
    driver = None
    try:
        driver = crawler.fetch_article_html_enhanced(url)
        if driver:
            authors = crawler.parse_article(driver.page_source)
            print("找到的通讯作者:")
            if authors:
                for author in authors:
                    print(f"  - 姓名: {author['name']}, 邮箱: {author['email']}")
            else:
                print("  - 未找到通讯作者。")
    finally:
        if driver:
            driver.quit()