"""
@FileName：2.lxml豆瓣网 top250.py
@Author：lmz
@Time：2021/5/24 20:00
"""
from lxml import etree
import requests
import json
#爬取的url地址
url = 'https://www.qiushibaike.com/text/page/1/'
#网站可能会反爬所以设置了请求头部
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                  '(KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
def get_one_page(url):
    try:
        response = requests.get(url,headers=headers)
        if response.status_code == 200:
            return response.content
        return None
    except requests.RequestException:
        return None


def parse_one_page(content):
    html = etree.HTML(content)
    films = html.xpath('//div[@class="article"]/ol/li')
    for film in films:
        yield{
            'name': film.xpath('./div//span[@class="title"][1]/text()'),
            'index': film.xpath('./div//div[@class="pic"]/em/text()'),
            'quote': film.xpath('./div//span[@class="inq"]/text()'),
            'rating_num': film.xpath('./div//span[@class="rating_num"]/text()'),
        }

def write(data):
    with open('douban_top250.json', 'a', encoding='utf-8') as f:
        f.write(json.dumps(data, ensure_ascii=False) + '\n')


if __name__ == '__main__':
    douban_films = 'https://movie.douban.com/top250?start='
    for i in range(0, 226, 25):
        page = douban_films + str(i)
        content = get_one_page(page)
        if content is not None:
            for item in parse_one_page(content):
                write(item)
        else:
            print('Request Error')

