from bs4 import BeautifulSoup
import requests
import re
import os

import threading

# 函数:获取去除首尾空格的字符串
get_text_strip = lambda x: x.text.strip()
# 函数:按<br>标签分割
# 1.标签子元素文本列表(<br/>转为空串,长度为0)
# 2.按字符串长度过滤(过滤掉<br/>)
# 3.去除首尾空格
strip_html_br = lambda x: list(
    map(lambda z: z.strip(), filter(len, map(lambda y: y.text, x.contents)))
)
"""
将一维列表元素(按空格符)分割
"""
def split_list(list: list)-> list:
    return [name for pair in list for name in pair.split()]
"""
将二维列表子列表(按空格符)分割
"""
def split_sublist(list: list,split_list=split_list)-> list:
    for i in range(len(list)):
        list[i]=split_list(list[i])
    return list

#目标地址
domain="https://yuc.wiki"
def url(uri: str)-> str:
    return domain+uri
"""
获取文件名
"""
def get_filename(url: str)-> str:
    return url.replace(domain,"wiki").replace('/','')+'.html'
"""
拉取html
"""
def fetch(url: str):
# 如果不存在本地文件,则发请求保存到本地
    filename=get_filename(url)
    if not os.path.exists(filename):
        res = requests.get(url)
        res.encoding = "utf-8"
        soup = BeautifulSoup(res.text, "lxml")
        open(filename, "w", encoding="utf-8").write(soup.prettify())

"""
解析html
"""
def load(url: str)->BeautifulSoup:
    # 解析本地文件
    filename=get_filename(url)
    return BeautifulSoup(open(filename, encoding="utf-8"), "lxml")
def get_seasons():
    soup=load(domain)
    seasons=list(map(lambda x:x.find('a')['href'],soup.find_all('td',class_='index_season')))
    return seasons
def fetch_seasons(domain: str):
    seasons=get_seasons()
    for uri in seasons:
        try:
            t=threading.Thread(target=fetch,args=(domain+uri,))
            t.start()
        except:
            print('error:unable to start thread')

"""
初始化所有文件
"""
def init():
    fetch(domain)
    fetch_seasons(domain)

"""
解析季度动画片
"""
def _load_animes_unsafe(uri: str)-> list:
    soup=load(url(uri))
    # 中文标题
    main_titles=soup.find_all('td',class_=re.compile('title_main'));
    cn_titles=list(map(lambda x:x.find('p',class_=re.compile('title_cn')),main_titles))
    cn_titles=list(map(get_text_strip,cn_titles))
    # 日文标题
    jp_titles=list(map(lambda x:x.find('p',class_=re.compile('title_jp')),main_titles))
    jp_titles=list(map(get_text_strip,jp_titles))
    # 动画片类型,使用正则表达式匹配
    type_animes=list(map(strip_html_br,soup.find_all('td',class_=re.compile('type_'))))
    # 标签
    type_tags=list(map(strip_html_br,soup.find_all('td',class_=re.compile('type_tag_r*'))))
    type_tags=split_sublist(type_tags,split_list=lambda x:[name for pair in x for name in pair.split('/')])
    # 工作人员
    staffs=list(map(strip_html_br,soup.find_all('td',class_=re.compile('staff_r*'))))
    # 演员
    casts=list(map(strip_html_br,soup.find_all('td',class_=re.compile('cast_r*'))))
    casts=split_sublist(casts)
    # 相关链接
    links=list(map(lambda x:list(map(lambda e:{'name':e.text.strip(),'url':e['href']},x.find_all('a'))),soup.find_all('td',class_="link_a_r")))
    # 播出时间
    broadcasts=list(map(strip_html_br,soup.find_all('p',class_="broadcast_r")))
    # 集数
    broadcast_episodes=list(map(strip_html_br,soup.find_all('p',class_='broadcast_ex_r')))
    
    animes=[]
    for i in range(len(cn_titles)):
        anime={}
        anime['cn_title']=cn_titles[i]
        anime['jp_title']=jp_titles[i]
        anime['type']=type_animes[i]
        anime['tags']=type_tags[i]
        anime['staff']=staffs[i]
        anime['cast']=casts[i]
        anime['link']=links[i]
        anime['broadcast']=broadcasts[i]
        anime['episodes']=broadcast_episodes[i]
        animes.append(anime)

    return animes


from pprint import pprint

def load_animes(uri:str):
    def get_list_strip_br(name: str,class_name: str,soup:BeautifulSoup)-> list:
        return [] if not soup.find(name,class_=re.compile(class_name)) else strip_html_br(soup.find(name,class_=re.compile(class_name)))
        
    soup=load(url(uri))
    tables=list(filter(lambda x:x.find('td',class_=re.compile('title_main')),soup.find_all('table')))
    
    animes=[]
    for table in tables:
        anime={}
        title_main=table.find('td',class_=re.compile('title_main'))
        anime['cn_title']=get_text_strip(title_main.find('p',class_=re.compile('title_cn')))
        anime['jp_title']=get_text_strip(title_main.find('p',class_=re.compile('title_jp')))
        anime['type']=get_list_strip_br('td','type',table)
        anime['tags']=[x for pair in get_list_strip_br('td','type_tag',table) for x in pair.split('/')]
        anime['staff']=get_list_strip_br('td','staff',table)
        anime['cast']=split_list(get_list_strip_br('td','cast',table))
        anime['link']=list(map(lambda e:{'name':e.text.strip(),'url':e['href']},table.find_all('a')))
        anime['broadcast']=get_list_strip_br('p','broadcast',table)
        anime['episodes']=get_list_strip_br('p','broadcast_ex',table)
        cover=table.parent.find_previous_sibling().find('img')['data-src']
        anime['cover']='not found' if not cover else cover

        animes.append(anime)
    
    return animes

# animes=load_animes('/202504')