import pprint
import requests
from bs4 import BeautifulSoup
import pandas as pd

page_index=range(0,250,25)
def dowmload_all_htmls():
    urls=[]
    headers={
        'User-Agent':"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0"""
    }
    for idx in page_index:
        # params={
        #     # 'start': 25
        #     'start': {idx}
        # }
        url=f'https://movie.douban.com/top250?start={idx}&filter='
        print("craw html : ",url)
        r=requests.get(url,headers=headers)
        # print(r.status_code)
        if r.status_code!=200:
            raise Exception('error')

        urls.append(r.text)
        r.close()
    return urls

def parse_signle_html(url):
    soup=BeautifulSoup(url,'html.parser')
    article_items=(
        soup.find('div',class_='article')
            .find('ol',class_='grid_view')
            .find_all('div',class_='item')
    )
    datas=[]
    for article_item in article_items:
        rank=article_item.find('div',class_='pic').find('em').get_text()
        title=article_item.find('div',class_='hd').find('span',class_='title').get_text()
        p_content = article_item.find('div', class_='bd').get_text('\n', strip=True)
        # print(p_content)
        director_part = p_content.split('\n')[0].strip()
        # print(director_part)
        director = director_part.split('导演:')[1].split('主')[0].strip()
        # print(director)
        if '主演:' in p_content:
            first_actor = director_part.split('主演:')[1].split('/')[0].strip()
        else:
            continue
        # print(first_actor)

        # 处理年份和类型
        info_part = p_content.split('\n')[1].strip()
        # print(info_part)
        year = info_part.split('/')[0].strip()
        # print(year)
        country = info_part.split('/')[1].strip()
        # print(country)
        genre = info_part.split('/')[2].strip()
        # print(genre)
        info=article_item.find('div',class_='info')
        stars=(
            info.find('div',class_='bd').find_all('span')
                 # .find('div',class_='item')
            )
        # print(stars)
        # rating_star = stars[0]
        # rating_star=stars[0]['class'][0]
        rating_num=stars[1].get_text()
        comments=stars[3].get_text()


        datas.append({
            'number':rank,
            'movie_name':title,
            'director': director,
            'first_actor': first_actor,
            'issue_date': year,
            'country': country,
            'genre': genre,
            'rating_num':rating_num,
            'comments':comments.replace('人评价','')

    })
    return datas


if __name__ == '__main__':
    all_datas=[]
    htmls = dowmload_all_htmls()
    for html in htmls:
        all_datas.extend(parse_signle_html(html))
        for i in all_datas:
            pprint.pprint(i)
    # print(all_datas)
    df=pd.DataFrame(all_datas) #pd.DataFrame(all_datas)，将这些数据组织成表格形式
    df.to_string(index=False) #如果你只是想避免显示索引，可以在打印或输出 DataFrame 时使用 to_string(index=False) 方法
    # df.set_index('number',inplace=True) # 使用数据中的某一列作为索引
    # write_to_mysql(df)
    df.to_excel('豆pa评分top250.xlsx')

