# @Time    : 2025-04-11 15:51
# @Author: Fioman
# @Phone  : 13149920693
# @Tips      : Talk is cheap,show me the code ^_^

import csv
import re

import requests

from FirstLesson import crawler_config

url = "https://movie.douban.com/top250"
startIndex = 0
param = {
    "start": startIndex,
    "filter": ""
}
headers = {
    "User-Agent": crawler_config.userAgent,
    "cookie": 'bid=9rxAeh_t6QQ; douban-fav-remind=1;'
              ' _pk_id.100001.4cf6=e425be9890bea4d6.1724485957.; '
              '__yadk_uid=cbuIYxhpa50b5v09Gr5OqagC6dqymqFh; viewed="2994858"; '
              '_vwo_uuid_v2=D4D953A4B39933EF0D7ECB922D273D1FE|3139431b4eb02b36cf41b6'
              '9411481fa2; ll="118282"; __utmc=30149280; __utmc=223695111; _ga=GA1.1.1962796087.17'
              '44336587; _sharedID=ab0c95e3-c964-4e6e-8fb2-6a1cd2e2ce1c; _sharedID_cst=2SzgLJUse'
              'Q%3D%3D; _ga_P83QWMDYS1=GS1.1.1744336586.1.1.1744337091.0.0.0; __utmz=3014'
              '9280.1744350119.12.8.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; '
              '__utmz=223695111.1744350121.9.5.utmcsr=douban.com|utmccn=(referral)|utmcmd=referra'
              'l|utmcct=/; ap_v=0,6.0; __utma=30149280.117404219.1720682159.1744357910.1744361571.1'
              '4; __utmb=30149280.0.10.1744361571; __utma=223695111.1782765838.1724485957.174435791'
              '0.1744361571.11; __utmb=223695111.0.10.1744361571; _pk_ref.100001.4cf6=%5B%22%22%'
              '2C%22%22%2C1744361572%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; '
              '_pk_ses.100001.4cf6=1; dbcl2="288272617:jJq+hl/o/nI"; ck=3WqL; push_noty_num=0; '
              'push_doumail_num=0; frodotk_db="4d468eccbf292f54e7d3ac663d7b8dfc"'
}

pattern = r'''
   <li>.*?<span.*?class="title">(?P<chinese_title>.*?)</span>  # 匹配中文标题
    .*?<br>\s*(?P<year>.*?)\s*&nbsp;/&nbsp;  # 匹配年份
    .*?<span.*?property="v:average">(?P<rating>.*?)</span>  # 匹配评分
    .*?<span>(?P<votes>.*?)人评价</span>.*?  # 匹配评价人数
'''
comp = re.compile(pattern, re.S | re.X)  # 编译正则表达式
with open("data.csv", mode="w", encoding="utf-8") as f:
    for i in range(10):
        param["start"] = startIndex
        startIndex += 25
        # 发送请求
        resp = requests.get(url, params=param, headers=headers)
        if resp.status_code > 200:
            print("请求失败,错误码:", resp.status_code)
            break
        else:
            html = resp.text
            # 使用正则表达式匹配数据
            result = comp.finditer(html)
            csvWriter = csv.writer(f)
            for it in result:
                groupDict = it.groupdict()
                print(groupDict)
                # 将数据写入CSV文件
                csvWriter.writerow(groupDict.values())
