# -*- coding: utf-8 -*-

"""
DateTime   : 2021/04/12 11:26
Author     : ZhangYafei
Description: 猫眼Top100
https://maoyan.com/board/4
"""
import csv
import os
from typing import Iterable, Any

from lxml import etree
from requests import Session
from zyf.timer import timeit


class MaoyanSpider:
    def __init__(self):
        self.url = 'https://maoyan.com/board/4'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
            'Referer': 'https://maoyan.com/board/2',
            'Cookie': '__mta=245610171.1614254360369.1614420203150.1614420512427.16; uuid_n_v=v1; uuid=E41F14A0776011EBBFF0B5C7CD64C724FAFDD8E0D1B84659A26E968D2510A0AF; _lxsdk_cuid=177d90ec6dec8-09811bc4a62d0d-73e356b-ff000-177d90ec6dec8; _lxsdk=E41F14A0776011EBBFF0B5C7CD64C724FAFDD8E0D1B84659A26E968D2510A0AF; _csrf=0c2482b6fdf753b72ea9b7e32b62df160f6aa2651c638f8dd9e0341edd47f769; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1614254360,1614415657; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; __mta=245610171.1614254360369.1614254443596.1614415659559.9; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1614420512; _lxsdk_s=177e2d11e96-f83-1ea-477%7C%7C9',
        }
        self.session = Session()
        self.session.headers = headers
        self.total_pages = 10
        self.dir_path = 'data'
        if not os.path.exists(self.dir_path):
            os.mkdir(self.dir_path)
        self.filepath = f'{self.dir_path}/猫眼Top100.csv'
        self.init_file()

    def init_file(self):
        self.columns = ['电影名', '主演', '上映时间', 'url', '图片地址', '评分']
        self.file = open(self.filepath, encoding='utf_8_sig', mode='w', newline='')
        self.csv_file = csv.writer(self.file)
        self.csv_file.writerow(self.columns)

    def spider(self, page):
        url = f'{self.url}?offset={page * 10}'
        response = self.session.get(url)
        if response.status_code != 200:
            raise Exception(f'requerst failed, status_code {response.status_code}')
        response = etree.HTML(response.text)
        dd_list = response.xpath('//dl[@class="board-wrapper"]/dd')
        rows = []
        for dd in dd_list:
            url = 'https://maoyan.com' + dd.xpath('a/@href')[0]
            img_src = dd.xpath('a/img[2]/@data-src')[0]
            item = dd.xpath('div[@class="board-item-main"]/div[@class="board-item-content"]/div')
            name = item[0].xpath('p[@class="name"]/a/text()')[0]
            star = item[0].xpath('p[@class="star"]/text()')[0].strip().replace('主演：', '')
            time = item[0].xpath('p[@class="releasetime"]/text()')[0].replace('上映时间：', '')
            score = ''.join(item[1].xpath('p[@class="score"]/i/text()'))
            rows.append([name, star, time, url, img_src, score])

        self.save_to_csv(rows)

    def save_to_csv(self, rows: Iterable[Iterable[Any]]):
        self.csv_file.writerows(rows)

    @timeit
    def run(self):
        for i in range(self.total_pages):
            print(f'开始爬取第 {i + 1} 页')
            self.spider(page=i)
        print('所有数据获取完成')

    def __del__(self):
        self.file.close()


if __name__ == '__main__':
    maoyan = MaoyanSpider()
    maoyan.run()
