"""
拿到页面源代码 requests
通过 正则 来提取想要的内容 regex
"""
import csv
import time

import requests
import regex

from utils import os_utils


def query_and_save(start):
    url = 'https://movie.douban.com/top250'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36'
    }
    param = {
        "start": start,
    }
    with requests.get(url, param, headers=headers) as response, open('豆瓣TOP250.csv', 'a', encoding='utf-8') as f:
        # print(response.text)
        regex_compile = regex.compile(
            r'<li>.*?<span class="title">(?P<name>.*?)</span>'
            r'.*?<p>.*?<br>(?P<year>.*?)&nbsp'
            r'.*?<span class="rating_num" property="v:average">(?P<score>.*?)</span>'
            r'.*?<span>(?P<evaluate_count>.*?)人评价</span>',
            regex.S)  # regex.S 标志允许 '.' 匹配包括换行在内的所有字符
        iterator = regex_compile.finditer(response.text)
        lst = []
        writer = csv.writer(f)
        for item in iterator:
            # print(item.group())
            # print(item.group('name'))
            # print(item.group('year').strip())
            # print(item.group('score'))
            # print(item.group('evaluate_count'))
            group_dict = item.groupdict()
            group_dict['year'] = group_dict['year'].strip()
            writer.writerow(group_dict.values())
            lst.append(group_dict)
        return lst


os_utils.remove('豆瓣TOP250.csv')
start = 0
while True:
    result = query_and_save(start)
    start += 25
    time.sleep(3)
    if not result:
        break

# 如果要在指定位置追加写入，那需要如下写法：
# with open('豆瓣TOP250.csv', 'r+', encoding='utf-8') as f:
#     f.seek(100)  # 将文件指针移动到第100字节位置
#     f.write('新内容')  # 从该位置开始写入
