import pprint
import json
from tkinter.constants import CURRENT

import requests
from bs4 import BeautifulSoup
import selenium
import pandas as pd



def download_all_htmls():
    htmls = []
    for idx in page_indexs:
        url="https://www.douban.com/doulist/3936288/?start={0}&sort=time&playable=0&sub_type=".format(idx)
        print("李星烨craw html:",url)
        headers = {'user-agent':
                       'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
                   ,'cookie':
                       'bid=9Y6U-Dd9Ebk; dbcl2="285037456:TXniogibIc4"; __utmz=30149280.1732512981.1.1.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; push_noty_num=0; push_doumail_num=0; _pk_id.100001.8cb4=c4d420133ccdf7a2.1732513098.; __utmv=30149280.28503; __yadk_uid=2sEsSO9eZBReemQRFd5jZGHF3nVu7qRk; ck=J_0a; _pk_ses.100001.8cb4=1; __utma=30149280.1156984789.1732512981.1732512981.1732530714.2; __utmc=30149280; __utmt=1; __utmb=30149280.4.10.1732530714'}
        r = requests.get(url,headers=headers)
        if r.status_code != 200:
            raise Exception("error")
        htmls.append(r.text)
    return htmls


def parse_single_html(html):
    soup = BeautifulSoup(html,'html.parser')
    movies = (soup.find("div",class_="article")).find_all("div",class_="doulist-item")
    datas = []
    for movie in movies:
        title = movie.find("div",class_="title").find("a").get_text()
        stars = movie.find("div",class_="rating").find_all("span")
        rating_star = stars[0]["class"][0]
        rating_num = stars[1].get_text()
        comments = stars[2].get_text()
        datas.append({
            'title':title,
            'rating_star':rating_star.replace("allstar",""),
            'rating_num':rating_num,
            'comments':comments.replace("人评价","").replace("(","").replace(")", "")
        })
    return datas


page_indexs = range(0,250,25)
htmls = download_all_htmls()
# pprint.pprint(parse_single_html(htmls[0]))
print("lxy")

all_datas = []
for html in htmls: # 解析 10 个网页的全部数据，结果存入 all_datas 列表中
    all_datas.extend(parse_single_html(html))
df = pd.DataFrame(all_datas) # 结果存入 excel
df.to_excel("李星烨+20231205410+豆瓣电影 TOP250.xlsx")


# url = "https://www.douban.com/doulist/3936288/?start=25&sort=time&playable=0"
# headers = {'user-agent':
#                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'}

# r = requests.get(url,headers = headers)
# r.status_code
# r.text
