import re
import requests
import time

# url="https://movie.douban.com/top250"  # url后面不跟参数，默认start=0，只看得到第一页的25个数据
base_url = "https://movie.douban.com/top250?start={}"

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
}

with open("../top250.csv", mode="w", encoding="utf-8") as f:
    for start in range(0, 251, 25):  # 从0开始，每次增加25，直到250
        url = base_url.format(start)
        resp = requests.get(url, headers=headers)
        resp.encoding = "utf-8"
        pageSource = resp.text
# .* 贪婪匹配
# .*? 惰性匹配
        obj = re.compile(r' <div class="item">.*?<span class="title">(?P<name>.*?)</span>.*?'
                         r'<p class="">.*?导演:(?P<daoyan>.*?)&nbsp;.*?<br>'
                         r'(?P<year>.*?)&nbsp;.*? '
                         r'<span class="rating_num" property="v:average">(?P<score>.*?)</span>'
                         r'.*?<span>(?P<num>.*?)人评价</span>', re.S)
        res = obj.finditer(pageSource)

        for item in res:
            name = item.group("name")
            daoyan = item.group("daoyan")
            year = item.group("year").strip()
            score = item.group("score")
            num = item.group("num")
            print(name,daoyan,year,score,num)  # 一个一个输出很有成就感
            # time.sleep(1)
            # f.write(f"{name},{daoyan},{year},{score},{num}\n")

print("豆瓣top250数据提取到top250.csv成功")
