# coding:utf-8
'''
豆瓣电影

https://movie.douban.com/top250?start=0
'''
import urllib3
from urllib3 import PoolManager
from bs4 import BeautifulSoup
import pandas as pd

movie_limit_count = 250

movie_items = []
movie_list_url = "https://movie.douban.com/top250?start=0"
http = PoolManager()
urllib3.disable_warnings()

while True:
    result = http.request(method="GET", url=movie_list_url)
    soup = BeautifulSoup(result.data.decode(), "html.parser")
    for item in soup.find_all("div", attrs={"class": "item"}):
        pic_url = item.find("div", attrs={"class": "pic"}).find("img")["src"]
        movie_url = item.find("div", attrs={"class": "pic"}).find("a")["href"]
        title = item.find("div", attrs={"class": "info"}).find(
            "span", attrs={"class": "title"}).text
        rating_num = float(
            item.find("span", attrs={"class": "rating_num"}).text)
        if item.find("p", attrs={"class": "quote"}):
            remark = item.find("p", attrs={"class": "quote"}).text
        movie_items.append([pic_url, movie_url, title, rating_num, remark])
    if len(movie_items) > movie_limit_count:
        break
    else:
        find_next_page = False
        '''
        <span class="next">
            <link rel="next" href="?start=25&amp;filter=">
            <a href="?start=25&amp;filter=">后页&gt;</a>
        </span>
        '''
        if soup.find("span", attrs={"class": "next"}).find("a"):
            movie_list_url = "https://movie.douban.com/top250" + \
                soup.find("span", attrs={"class": "next"}).find(
                    "a")["href"]
            find_next_page = True
        if not find_next_page:
            break

movie_items = movie_items[:movie_limit_count]
df = pd.DataFrame(movie_items, columns=[
                  "图片URL地址", "电影详情链接", "电影标题", "豆瓣评分", "一句话影评"])
df.to_excel("d:/豆瓣top250.xlsx")
