'''
使用lxml-xpath爬取电影天堂的信息
'''
import json
import os

import requests
from lxml import etree
from requests.exceptions import RequestException

BASE_DOMAIN = 'https://www.dytt8.net'
filename = 'movie.json'


# 访问电影天堂出现乱码
# 传入tag=1代表获取电影的链接，不手动解码
# 传入tag=2代表获取电影的详情，要自动解码
def get_response(url, tag):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
    }
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            if tag == 1:
                return response.text
            elif tag == 2:
                # 解析成什么编码，具体要看网页的编码是什么
                return response.content.decode('gbk')
            else:
                print('传入的的tag值有误，不为（1，2）')
                return None
        print('status is not 200')
        return None
    except RequestException as e:
        print('RequestException')
        print(e)
        return None


def get_detail_url(content):
    html = etree.HTML(content)
    detail_urls = html.xpath('//table[@class="tbspan"]//a/@href')
    detail_urls = map(lambda url: BASE_DOMAIN + url, detail_urls)
    return detail_urls


def parse_info(info, rule):
    return info.replace(rule, '').strip()


def get_detail_info(content):
    movie = {}
    html = etree.HTML(content)
    title = html.xpath('//div[@class="title_all"]//text()')[0]
    movie['title'] = title
    zoomE = html.xpath('//div[@id="Zoom"]')[0]
    # 有的网页有截图，有的没有截图
    imgs = zoomE.xpath('.//img/@src')
    if len(imgs) == 2:
        movie['cover'] = imgs[0]
        movie['shortcut'] = imgs[1]
    elif len(imgs) == 1:
        movie['cover'] = imgs[0]
        movie['shortcut'] = ''
    else:
        movie['cover'] = imgs
        movie['shortcut'] = ''

    infos = zoomE.xpath('.//text()')

    for index, info in enumerate(infos):
        if info.startswith('◎年　　代'):
            movie['year'] = parse_info(info, '◎年　　代')
        elif info.startswith('◎产　　地'):
            movie['country'] = parse_info(info, '◎产　　地')
        elif info.startswith('◎类　　别'):
            movie['type'] = parse_info(info, '◎类　　别')
        elif info.startswith('◎语　　言'):
            movie['language'] = parse_info(info, '◎语　　言')
        elif info.startswith('◎上映日期'):
            movie['pubtime'] = parse_info(info, '◎上映日期')
        elif info.startswith('◎豆瓣评分'):
            movie['douban_rate'] = parse_info(info, '◎豆瓣评分')
        elif info.startswith('◎片　　长'):
            movie['runtime'] = parse_info(info, '◎片　　长')
        elif info.startswith('◎导　　演'):
            movie['director'] = parse_info(info, '◎导　　演')
        elif info.startswith('◎编　　剧'):
            editor_first = parse_info(info, '◎编　　剧')
            editors = [editor_first]
            for x in range(index+1, len(infos)):
                editor = infos[x].strip()
                if editor.startswith('◎'):
                    break
                editors.append(editor)
            editors = ','.join(editors)
            movie['editors'] = editors
        elif info.startswith('◎主　　演'):
            actor_first = parse_info(info, '◎主　　演')
            actors = [actor_first]
            for x in range(index + 1, len(infos)):
                actor = infos[x].strip()
                if actor.startswith('◎'):
                    break
                actors.append(actor)
            actors = ','.join(actors)
            movie['actors'] = actors
        elif info.startswith('◎简　　介 '):
            profiles = ''
            for x in range(index + 1, len(infos)):
                profile = infos[x].strip()
                if profile.startswith('◎'):
                    break
                profiles += profile
            movie['profile'] = profiles
    print(movie)
    return movie
    # print(etree.tostring(zoomE, encoding='utf-8').decode('utf-8'))


def save_to_file(path, item):
    try:
        if not os.path.exists(path):
            with open(path, "w", encoding='utf-8') as f:
                json.dump(item, f,ensure_ascii=False)
                f.write(',\n')
                print("^_^ write success")
        else:
            with open(path, "a", encoding='utf-8') as f:
                json.dump(item, f, ensure_ascii=False)
                f.write(',\n')
                print("^_^ write success")
    except Exception as e:
        print("write error==>", e)


def main():
    base_url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'
    movie_list = []
    for i in range(1, 8):
        print('第{}页'.format(i))
        movies = []
        url = base_url.format(i)
        html = get_response(url, 1)
        if html is not None:
            # 每一页的url
            detail_urls = get_detail_url(html)
            # 获取每部电影的url
            for index,detail_url in enumerate(detail_urls):
                movie_html = get_response(detail_url, 2)
                if movie_html is not None:
                    # 获取电影的详细信息
                    movie = get_detail_info(movie_html)
                    save_to_file(filename, movie)
                    movies.append(movie)
                else:
                    print('第{}页,第{}url请求出错'.format(i, index))
        else:
            print("第{}页请求出错".format(i))
        movie_list.extend(movies)


if __name__ == '__main__':
    main()
