import csv
import os
import re
import requests

'''
爬取豆瓣top250电影信息的思路
1、通过requests获取到top250第一页的html内容
2、分析html，通过正则表达式提取出html中需要的内容，利用.*?进行惰性提取
3、将内容通过csv写入文件中
4、分析第二页第三页等的url规律，然后循环拼接处url
5、再循环1-4将内容存到csv中
'''
def start():
    '''
    爬虫开始
    :return:
    '''
    if os.path.isfile('D:\豆瓣.csv'):
        # 如果文件夹存在，删除它
        os.remove('D:\豆瓣.csv')

    write_title_to_file(['电影名称', '导演', '年份', '国家', '类型', '平均评分'])
    url = "https://movie.douban.com/top250"  # 开始爬取的起始链接
    pageNum = 1
    # 循环
    while pageNum <= 10:
        data = get_data(url)
        d = oper_date(data)
        write_to_file(d)
        pageNum += 1
        url = "https://movie.douban.com/top250?start={}".format(25*(pageNum-1))

def write_to_file(data):
    '''
    将爬取到的数据保存到文件中
    :param dataDict:
    :return:
    '''
    with open('D:\豆瓣.csv', 'a', encoding='utf-8', newline='') as f:     # 注意设置编码格式
        '''
            newline 用于避免空行
        '''
        writer = csv.writer(f)
        writer.writerows(data)

def write_title_to_file(title):
    '''
    将爬取到的数据保存到文件中
    :param dataDict:
    :return:
    '''
    with open('D:\豆瓣.csv', 'a', encoding='utf-8', newline='') as f:     # 注意设置编码格式
        '''
            newline 用于避免空行
        '''
        writer = csv.writer(f)
        writer.writerow(title)

def get_data(url):
    '''
    根据url爬取单页数据
    :param url:
    :return:
    '''
    print(url)
    # 最基础的反爬机制是判断user-Agent是否是正常浏览器设备
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36'
    }
    resp = requests.get(url, headers=headers)
    if (resp.status_code != 200):
        raise Exception()
    resp.encoding = 'utf-8'
    # print(resp.text)
    obj = re.compile(r'<div class="item">.*?<span class="title">(?P<title>.*?)</span>'
                     r'.*?<p class="">(?P<daoyan>.*?)<br>(?P<year>.*?)</p>'
                     r'.*?<span class="rating_num" property="v:average">(?P<score>.*?)</span>',
                     re.S)  # re.S 可以让正则对换行符进行匹配，因为.*?匹配不了换行符
    return obj.finditer(resp.text)


def oper_date(result):
    '''
    处理爬虫爬取到的数据
    :param result:
    :return:
    '''
    listDate = []
    for item in result:
        data = item.groupdict()
        # 处理导演
        dy = data['daoyan'].strip()
        dy = dy.split('&nbsp;')[0]
        data['daoyan'] = dy.replace('导演: ', '')
        # 处理年份
        nf = data['year'].strip()
        nfs = nf.split('&nbsp;/&nbsp;')
        data['year'] = nfs[0]
        data['nation'] = nfs[1]
        data['type'] = nfs[2]

        listDate.append([data['title'], dy.replace('导演: ', ''), nfs[0], nfs[1], nfs[2], data['score']])

    # print(listDate)
    return listDate

if __name__=='__main__':
    start()
