#!/usr/bin/env python
# encoding: utf-8

"""
爬取电影天堂
"""

import requests
from lxml import etree
import time
import json

# 主页地址
main_url = "https://www.dytt8.net"

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}

def get_detail_urls(url):
    """
    获取电影详情页面的url
    :param url: 每页电影列表的地址url
    :return:
    """
    response = requests.get(url, headers=headers)
    # print(response.content)
    html_element = etree.HTML(response.text)
    # print(html_element)

    # 【数据 - 字符串列表】详情页面地址
    # 所有class为tbspan的table标签/子孙标签中的a标签的href属性
    detail_urls = html_element.xpath('//table[@class="tbspan"]//a/@href')
    # print("detail_urls=", detail_urls)

    detail_urls_new = detail_urls
    for index,detail_url in enumerate(detail_urls_new):
        if detail_url == '/html/gndy/jddy/index.html':
            detail_urls.remove(detail_url)

    # 组装详情页面的地址
    detail_urls = map(lambda x: main_url+x, detail_urls)
    return detail_urls

def parse_detail_page(detail_url):
    """
    解析电影详情页面
    :param detail_url: 详情页面地址
    :return:
    """
    response = requests.get(detail_url, headers=headers)
    # text = response.content.decode("gb2312")
    text = response.content
    print("text=", text)
    html_element = etree.HTML(text)
    # 电影标题
    title = html_element.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')[0]
    # print("title=", title)
    # Zoom内容
    zoom_element = html_element.xpath('//div[@id="Zoom"]')[0]

    #电影封面和电影截图
    imgs = zoom_element.xpath('.//img/@src')

    # 注意：为了避免脏数据导致应用挂掉，提前初始化
    year, country, type, rating, duration, director, actors, cover, screen_shot, download_url, desc = '', '', '', '', '', '', '', '', '', '', ''

    if len(imgs)>1: # 判断长度，避免后续出错
        screen_shot = imgs[1]

    # 获取Zoom中的所有的文本数据
    zoom_infos = zoom_element.xpath('.//text()')

    # 定义解析具体内容的内部函数
    def parse_zoom_info(info, rule):
        return info.replace(rule, '').strip()

    # 遍历zoom_infos的数据项
    for key, info in enumerate(zoom_infos):
        item_name1 = '◎年　　代'
        item_name2 = '◎产　　地'
        item_name3 = '◎类　　别'
        item_name4 = '◎语　　言'
        item_name5 = '◎字　　幕'
        item_name6 = '◎上映日期'
        item_name7 = '◎IMDb评分'
        item_name8 = '◎豆瓣评分'
        item_name9 = '◎片　　长'
        item_name10 = '◎导　　演'
        item_name11 = '◎编　　剧'
        item_name12 = '◎主　　演'
        item_name13 = '◎标　　签'
        item_name14 = '◎简　　介'
        item_name15 = '【下载地址】'

        if info.startswith(item_name1):
            year = parse_zoom_info(info, item_name1)
        elif info.startswith(item_name2):
            country = parse_zoom_info(info, item_name2)
        elif info.startswith(item_name3):
            type = parse_zoom_info(info, item_name3)
        elif info.startswith(item_name8):
            rating = parse_zoom_info(info, item_name8)
        elif info.startswith(item_name9):
            duration = parse_zoom_info(info, item_name9)
        elif info.startswith(item_name10):
            director = parse_zoom_info(info, item_name10)
        elif info.startswith(item_name12):
            # 第一个演员
            actors_first = parse_zoom_info(info, item_name12)
            actors = [actors_first]

            for i in range(key+1, len(zoom_infos)):
                item = zoom_infos[i].strip()
                if item.startswith(item_name13):
                    break
                actors.append(item)
        elif info.startswith(item_name14):
            for i in range(key+1, len(zoom_infos)):
                item = zoom_infos[i].strip()
                if item.startswith(item_name15):
                    break
                desc = item

    # 下载地址
    if len(html_element.xpath('//td[@bgcolor="#fdfddf"]/a/text()')) > 0:
        download_url = html_element.xpath('//td[@bgcolor="#fdfddf"]/a/text()')[0]
    elif len(html_element.xpath('//td[@bgcolor="#fdfddf"]/text()')) > 0:
        download_url = html_element.xpath('//td[@bgcolor="#fdfddf"]/text()')[0]

    film = {
        "title": title,
        "cover": cover,
        "screen_shot": screen_shot,
        "year": year,
        "country": country,
        "type": type,
        "rating": rating,
        "duration": duration,
        "director": director,
        "actors": actors,
        "desc": desc,
        "download_url": download_url
    }

    return json.dumps(film, ensure_ascii=False)

def spider():
    """
    程序入口
    :return:
    """
    list_url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'

    films = []

    # 1-获取第1-10页的数据
    for index in range(1, 11):
        print("开始爬第{}页".format(index))

        # 2-构造电影列表的地址url
        url = list_url.format(index)
        # print("url=", url)

        # 3-获取当前页面包含的所有电影
        detail_urls = get_detail_urls(url)
        # print(detail_urls)

        # 4-解析每一项电影的详情页面
        for key, detail_url in enumerate(detail_urls):
            # print("key=" + str(key) + ",detail_url=" + detail_url)
            film = parse_detail_page(detail_url)
            print('film = ', film)
            films.append(json.loads(film))

        # 每爬取一页，休眠1秒
        time.sleep(1)

    print("films = ", json.dumps(films, ensure_ascii=False))

if __name__ == '__main__':
    spider()
