# -*- coding: utf-8 -*-

"""
DateTime   : 2021/04/18 9:42
Author     : ZhangYafei
Description:
pip install requests lxml -i https://pypi.tuna.tsinghua.edu.cn/simple/
知识点：
    1. requests的使用：如何发送一个请求，如何添加一些headers信息, 如何接收响应数据
    2. HTML数据的解析
        lxml模块，xpath的使用方法，如何正确书写xpath路径
    3. 数据的保存
        将数据保存为csv格式
"""
import requests
from lxml import etree
import csv


f = open('猫眼电影TOP100.csv', mode='w', encoding='utf_8_sig', newline='')
writer = csv.writer(f)
writer.writerow(['电影名', '主演', '上映时间', '评分', 'url'])


def maoyan_spider(page):
    offset = page * 10
    url = f'https://maoyan.com/board/4?offset={offset}'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77',
        'Referer': 'https://maoyan.com/board',
        'Cookie': '__mta=210680106.1618198451074.1618710020074.1618713624849.9; uuid_n_v=v1; uuid=F1690A509B3F11EBAB1D1FE68B47685D4D7D0EA204454B2BB4FC7295924B1B03; _lxsdk_cuid=178c424d2d371-068a0f57472b6-71667960-15f900-178c424d2d4c8; _lxsdk=F1690A509B3F11EBAB1D1FE68B47685D4D7D0EA204454B2BB4FC7295924B1B03; _csrf=d4962e818e5a67f4dc9e0bb288c7342e9c60fa3193abd2eb24c7cc91513e4d19; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1618198451,1618709684; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; __mta=210680106.1618198451074.1618200057070.1618709691708.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1618713624; _lxsdk_s=178e2d8e380-120-660-c68%7C%7C8'
    }

    # 1. 发送请求
    # 2. 接收响应数据
    response = requests.get(url=url, headers=headers)
    response.encoding = 'utf-8'
    # print(response)
    # 状态码
    # print(response.status_code)
    # 文本内容
    # print(response.text)
    # 二进制内容
    # print(response.content)
    # 将html保存到文件中
    # with open(f'猫眼Top100_{page}.html', mode='w', encoding='utf-8') as f:
    #     f.write(response.text)
    # 3. 解析响应数据
    html = etree.HTML(response.text)
    # a_list = html.xpath('//*[@id="app"]/div/div/div[1]/dl/dd[1]/div/div/div[1]/p[1]/a/text()')
    # print(a_list)
    # print(html.xpath('//*[@id="app"]/div/div/div[1]/dl/dd'))
    # 4. 保存数据
    for dd in html.xpath('//*[@id="app"]/div/div/div[1]/dl/dd'):
        title = dd.xpath('div[1]//p[@class="name"]/a/text()')[0]
        films_url = dd.xpath('div[1]//p[@class="name"]/a/@href')[0]
        films_url = f'https://maoyan.com{films_url}'
        star = dd.xpath('div[1]//p[@class="star"]/text()')[0].strip()
        release_time = dd.xpath('div[1]//p[@class="releasetime"]/text()')[0]
        score = ''.join(dd.xpath('div[1]//p[@class="score"]/i/text()'))
        print(title, films_url, star, release_time, score)
        writer.writerow([title, star, release_time, score, films_url])


if __name__ == '__main__':
    for page in range(10):
        maoyan_spider(page)
    f.close()


