# coding: utf-8
# 文件名称: dytt_spider.py
# 创建时间: 2021/6/2 21:14
from lxml import etree
import requests

YUMING = 'https://www.dytt8.net/'
HEADENS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.66'
}

# 从列表页获取详情页url函数
def get_li_urls(url):
    response = requests.get(url, HEADENS)
    text = response.content.decode('gbk', 'ignore')

    html = etree.HTML(text)
    data_urls = html.xpath("//table[@class='tbspan']//a/@href")
    data_url = map(lambda url: YUMING + url, data_urls)

    return data_url

def parse_data_page(url):
    # 抓取详情页
    response = requests.get(url, HEADENS)
    text = response.content.decode('gbk', 'ignore')
    # 解析信息
    movie = {}
    html = etree.HTML(text)
    title = html.xpath("//h1/font[@color='#07519a']/text()")[0]
    movie['片名'] = title

    Zoom = html.xpath("//div[@id='Zoom']")[0]
    src = Zoom.xpath(".//img/@src")[0]
    movie['海报'] = src

    infos = Zoom.xpath(".//text()")

    def cl_str(info, st):
        info = info.replace(st, '').strip()
        return info
    for index, info in enumerate(infos):
        if info.startswith('◎年　　代'):
            info = cl_str(info, '◎年　　代')
            movie['年代'] = info
        elif info.startswith('◎产　　地'):
            info = cl_str(info, '◎产　　地')
            movie['产地'] = info
        elif info.startswith('◎类　　别'):
            info = cl_str(info, '◎类　　别')
            movie['类别'] = info
        elif info.startswith('◎豆瓣评分'):
            info = cl_str(info, '◎豆瓣评分')
            movie['豆瓣评分'] = info
        elif info.startswith('◎主　　演'):
            info = cl_str(info, '◎主　　演')
            zhuyans = [info]
            for x in range(index+1, len(infos)):
                zhuyan = infos[x].strip()
                if zhuyan.startswith('◎'):
                    break
                zhuyans.append(zhuyan)
            movie['主演'] = zhuyans
        elif info.startswith('◎简　　介'):
            info = cl_str(info, '◎简　　介')
            cons = []
            for x in range(index+1, len(infos)):
                con = infos[x]
                if con.startswith('【下载地址】'):
                    break
                cons.append(con)

            cons = '。'.join(cons)
            movie['简介'] = cons

    download_url = html.xpath("//div[@id='Zoom']//a/@href")[0]
    movie['下载链接'] = download_url


    return movie
    #print(etree.tostring(Zoom, encoding='gbk').decode('gbk'))


def spider():
    # 获取所有列表
    urls = [f"https://www.dytt8.net/html/gndy/dyzz/list_23_{i}.html" for i in range(1, 8)]
    movies = []
    for url in urls:
        # 将所有列表页url传递给get_li_urls函数提取出所有详情页url
        li_urls = get_li_urls(url)
        for li_url in li_urls:
            movie = parse_data_page(li_url)
            movies.append(movie)
            print(movie)


if __name__ == '__main__':
    spider()