#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: custom.py
Author: StellaJL
Date: 2025-09-16 14:56:38
Version：1.0.0
Description: 自定义数据解析器示例
"""

from logger import get_logger
from lxml import etree

def parser_custom_data(content: str, source_url: str) -> tuple[list[dict], list[str]] | None:
    """
    自定义解析器，返回解析的数据和提取的新URL
    """
    logger = get_logger('custom_parser')
    logger.debug(f"开始解析自定义数据: {source_url}")
    
    try:
        html = etree.HTML(content)
        # 提取电影列表
        ul_list = html.xpath('//ul[@class="inqList pt18"]')
        
        if not ul_list:
            logger.warning("未找到电影列表容器")
            return [], []
        
        result = []
        new_urls = []
        
        # 提取分页链接
        try:
            # 查找分页容器 div#new_page 下的所有链接
            page_container = html.xpath('//div[@id="new_page"]')
            if page_container:
                # 获取分页容器下的所有 a 标签的 href 属性
                page_links = page_container[0].xpath('.//a/@href')
                for href in page_links:
                    if href and href.startswith('/'):
                        # 相对路径转换为绝对路径
                        full_url = f"https://www.1905.com{href}"
                        if full_url not in new_urls:
                            new_urls.append(full_url)
                    elif href and href.startswith('http'):
                        if href not in new_urls:
                            new_urls.append(href)
            
            # 备用方案：如果没找到分页容器，使用原来的方法
            if not new_urls:
                # 查找下一页链接
                next_page_elements = html.xpath('//a[contains(@class, "next") or contains(text(), "下一页") or contains(text(), ">")]/@href')
                for href in next_page_elements:
                    if href and href.startswith('/'):
                        full_url = f"https://www.1905.com{href}"
                        new_urls.append(full_url)
                    elif href and href.startswith('http'):
                        new_urls.append(href)
                
                # 查找页码链接
                page_elements = html.xpath('//a[contains(@href, "o0d0p")]/@href')
                for href in page_elements:
                    if href and href.startswith('/'):
                        full_url = f"https://www.1905.com{href}"
                        if full_url not in new_urls:
                            new_urls.append(full_url)
                    elif href and href.startswith('http'):
                        if href not in new_urls:
                            new_urls.append(href)
            
            logger.info(f"提取到 {len(new_urls)} 个新URL")
        except Exception as e:
            logger.warning(f"提取URL时出错: {e}")
        
        # 遍历每个 ul 容器
        for ul in ul_list:
            # 获取该 ul 下的所有 li 元素
            li_list = ul.xpath('./li')
            
            for li in li_list:
                try:
                    # 详细地址
                    detail_url_elements = li.xpath('./a/@href')
                    detail_url = detail_url_elements[0] if detail_url_elements else ""
                    # 海报 - 获取第一张图片的 src
                    img_elements = li.xpath('./a/img/@src')
                    img = img_elements[0] if img_elements else ""
                    
                    # 标题 - 获取第一个 p 标签下的 a 标签文本
                    title_elements = li.xpath('./div/p[1]/a/text()')
                    title = title_elements[0] if title_elements else ""
                    
                    # 时间 - 从 p[2] 的 id 属性中提取
                    time_elements = li.xpath('./div/p[2]/@id')
                    time = time_elements[0].split('_')[0] if time_elements else ""
                    
                    # 评分 - 优先获取 p[2]/b/text()，否则使用 p[2]/text()
                    score_elements = li.xpath('./div/p[2]/b/text()')
                    if not score_elements:
                        score_elements = li.xpath('./div/p[2]/text()')
                    score = score_elements[0].strip() if score_elements else ""
                    
                    # 主演和类型 - 根据 p[3] 是否包含"主演"来判断
                    p3_text = ''.join(li.xpath('./div/p[3]/text()'))
                    if '主演' in p3_text:
                        # 如果 p[3] 包含"主演"，则 p[3] 的 a 标签是主演，p[4] 的 a 标签是类型
                        actor_elements = li.xpath('./div/p[3]/a/text()')
                        actor = '/'.join(actor_elements) if actor_elements else ""
                        
                        type_elements = li.xpath('./div/p[4]/a/text()')
                        type = '/'.join(type_elements) if type_elements else ""
                    else:
                        # 如果 p[3] 不包含"主演"，则 p[3] 的 a 标签是类型，没有主演
                        actor = ""
                        
                        type_elements = li.xpath('./div/p[3]/a/text()')
                        type = '/'.join(type_elements) if type_elements else ""
                    
                    # 只有当至少有一个有效字段时才添加到结果中
                    if any([img, title, time, score, actor, type]):
                        result.append({
                            'detail_url': f"https://www.1905.com{detail_url}",
                            'movie_id': detail_url.split('/')[3],
                            'img': img,
                            'title': title,
                            'score': score,
                            'time': time,
                            'actor': actor,
                            'type': type,
                        })
                        logger.debug(f"成功提取电影数据: {title}")
                    else:
                        logger.warning("跳过空数据项")
                        
                except Exception as e:
                    logger.error(f"解析单个电影数据时出错: {e}")
                    continue
        
        logger.info(f"成功解析 {len(result)} 条电影数据，提取 {len(new_urls)} 个新URL")
        return result, new_urls
        
    except Exception as e:
        logger.error(f"解析自定义数据时出错: {e}")
        return None



