# 1、向服务器发起请求
# 2、对响应回来的内容进行解析
# 3、将解析后的内容保存在本地的文档中csv
import requests
import re
import csv
url="https://movie.douban.com/top250"
# User-Agent为用户标识，携带后，服务器将会识别当前的程序为一个正常用户
headers={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
}
resp=requests.get(url,headers=headers)
# print(resp.status_code)
result=resp.text
# print(result)
obj=re.compile(r'<li>.*?<span class="title">(?P<moviename>.*?)</span>'
               r'.*?<p class="">.*?<br>(?P<year>.*?)&nbsp;'
               r'.*?<span class="rating_num" property="v:average">(?P<rating>.*?)</span>'
               r'.*?<span>(?P<numperson>.*?)人评价</span>',re.S)
movieMsg=obj.finditer(result)
f=open("douban.csv","w")
csvWriter=csv.writer(f)
# 数据过滤之后将它保存起来
for i in movieMsg:
    # print(i.group("moviename"))
    # print(i.group("year").strip())
    # print(i.group("rating"))
    # print(i.group("numperson"))
    # 将过滤后的结果转换成字典（json）格式的数据
    rowData=i.groupdict()
    # 处理年份前面的空格
    rowData["year"]=rowData["year"].strip()
    csvWriter.writerow(rowData.values())
    pass
f.close()
resp.close()
