from urllib.parse import urljoin
from bs4 import BeautifulSoup
import re


def anime_detail_data(html_text):
    """
    从HTML文本中提取影视剧信息（精确限定容器范围）
    参数:
        html_text: 包含影视剧列表的HTML文本
    返回:
        {
            "data": [{
                "anime": {"title": "", "img": "", "url": "", "episode": ""},
                "desc": "",
                "score": "",
                "tags": [],
                "updateTime": "",
                "episode": [{"name": "", "url": ""}]
            }]
        }
    """
    soup = BeautifulSoup(html_text, 'html.parser')
    base_url = "https://d2883ce011d2398b.xdm01.cc"
    result = {"data": []}

    # 1. 查找主容器：class="myui-panel myui-panel-bg clearfix"
    anime_container = soup.find('div', class_='container')
    if not anime_container:
        return result  # 主容器不存在时返回空

    # 2. 提取anime信息（标题/图片/链接/当前集数）
    anime_data = {}
    thumb = anime_container.find('div', class_='myui-content__thumb')
    if thumb:
        link = thumb.find('a', class_='myui-vodlist__thumb')
        if link:
            anime_data = {
                "title": link.get('title', '').strip(),
                "img": link.get('data-original', ''),
                "url": urljoin(base_url, link.get('href', '')),
                "episode": link.find('span', class_='pic-text text-right').get_text(strip=True) 
                            if link.find('span', class_='pic-text text-right') else ""
            }

    # 3. 提取描述信息（限定class="col-pd text-collapse content"）
    desc_container = anime_container.find('div', class_='col-pd text-collapse content')
    desc = desc_container.find('p').get_text(strip=True) if desc_container and desc_container.find('p') else ""

    # 4. 提取评分/标签/更新时间（限定class="myui-content__detail"）
    detail_container = anime_container.find('div', class_='myui-content__detail')
    score = ""
    tags = []
    update_time = ""
    
    if detail_container:
        # 提取评分（class="branch"）
        score_span = detail_container.find('span', class_='branch')
        score = score_span.get_text(strip=True) if score_span else ""
        
        # 提取标签（通过相邻<a>标签）
        tag_spans = detail_container.find_all('span', class_='text-muted')
        for span in tag_spans:
            next_tag = span.find_next_sibling('a')
            if next_tag:
                tags.append(next_tag.get_text(strip=True))
        
        # 提取更新时间（class="text-red"）
        update_span = detail_container.find('span', class_='text-red')
        if update_span:
            update_text = update_span.get_text(strip=True)
            # 提取"更新至XX集"格式的内容
            if '更新至' in update_text:
                match = re.search(r'更新至[\d集]+', update_text)
                if match:
                    update_time = match.group(0)
            else:
                update_time = update_text
        else:
            update_time = ""

    # 5. 提取剧集列表（限定class="tab-content myui-panel_bd"）
    episode_list = []
    episode_container = anime_container.find('div', class_='tab-content myui-panel_bd')
    if episode_container:
        episode_links = episode_container.find_all('a', class_='btn btn-default')
        for ep_link in episode_links:
            episode_list.append({
                "name": ep_link.get_text(strip=True),
                "url": urljoin(base_url, ep_link.get('href', ''))
            })

    # 6. 整合所有数据
    result["data"].append(
        {
            "anime": anime_data,
            "desc": desc,
            "score": score,
            "tags": tags,
            "updateTime": update_time,
            "episode": episode_list
        }
    )
    
    return result




def anime_search_data(html_text):
    """
    从HTML文本中提取影视剧列表信息（限定在特定容器内）
    参数:
        html_text: 包含影视剧列表的HTML文本
    返回:
        {"data": [{"title": "影视剧名称", "url": "完整URL", "img": "封面图片URL", "episode": "更新集数"}, ...]}
    """
    soup = BeautifulSoup(html_text, 'html.parser')
    base_url = "https://d2883ce011d2398b.xdm01.cc"
    
    # 1. 定位目标容器：限定在class为col-md-wide-7 col-xs-1的div内
    target_container = soup.find('div', class_='col-md-wide-7 col-xs-1')
    if not target_container:
        return {"data": []}  # 容器不存在时返回空
    
    # 2. 在目标容器内查找影视剧项目
    media_items = target_container.find_all('div', class_='thumb')
    result = {"data": []}
    
    for item in media_items:
        # 3. 提取<a>标签
        link = item.find('a', class_='myui-vodlist__thumb')
        if not link:
            continue
            
        # 4. 提取标题
        title = link.get('title', '').strip()
        
        # 5. 处理URL
        relative_url = link.get('href', '')
        full_url = urljoin(base_url, relative_url)
        
        # 6. 提取封面图片
        img_url = link.get('data-original', '')
        
        # 7. 提取更新集数（从<span class="pic-text">）
        episode_span = link.find('span', class_='pic-text text-right')
        episode = episode_span.get_text(strip=True) if episode_span else ""
        
        # 8. 添加到结果集
        result["data"].append({
            "title": title,
            "url": full_url,
            "img": img_url,
            "episode": episode
        })
    
    return result



def anime_video_data(html_text):
    """
    从HTML文本中提取视频播放URL
    参数:
        html_text: 包含视频播放器的HTML文本
    返回:
        {"video_url": "完整的视频URL"} 或空字典
    """
    base_url = "https://danmu.yhdmjx.com/m3u8.php?url="
    video_url = ""
    
    # 直接在整个HTML文本中查找player_aaaa对象
    match = re.search(
        r'var player_aaaa\s*=\s*({.*?"url"\s*:\s*"[^"]+".*?});',
        html_text,
        re.DOTALL
    )
    
    if match:
        player_config = match.group(1)
        # 提取url字段
        url_match = re.search(r'"url":\s*"([^"]+)"', player_config)
        if url_match:
            video_url = url_match.group(1)
            # 拼接完整URL
            video_url = base_url + video_url
    
    return video_url

