'''
该代码针对豆瓣网前250的电影信息进行爬取，并写入CSV文件进行保存
'''
import requests
from bs4 import BeautifulSoup
import csv

def request_douban(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/88.0.4324.146 Safari/537.36',
    }
    r = requests.get(url, headers=headers)
    if r.status_code == 200:
        return r.text
    else:
        return None

def parse_page(soup):
    data = soup.find(class_='grid_view').find_all('li')
    result_data = []
    for i in data:
        name = i.find(class_='title').string
        author = i.find('p').text.strip()  # 获取导演等信息
        img = i.find('a').find('img').get('src')
        rating_num = i.find(class_='rating_num').string
        quote = i.find(class_='quote')
        if quote:
            quote = quote.find(class_='inq').string  # 如果存在则获取内容
        else:
            quote = ''  # 如果不存在则设为空字符串
        result_data.append([name, author, img, rating_num, quote])
    return result_data

def save_to_csv(data, filename, ):
    with open(filename, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)

        writer.writerow(['名称', '作者', '图片', '评分', '摘要'])
        writer.writerows(data)

def main(page):
    url = 'https://movie.douban.com/top250?start=' + str(page * 25) + '&filter='
    html = request_douban(url)
    if html:
        soup = BeautifulSoup(html, 'lxml')
        data = parse_page(soup)
        save_to_csv(data, filename='D:\\PycharmProjects\\p_c\\output.csv')

if __name__ == '__main__':
    for i in range(10):  # 爬取前10页作为示例
        main(i)
    print('Done!')
