"""
爬取豆瓣电影Top250的详情信息并写入Excel文件

1. 抓取页面 -----> HTML源代码 -----> urllib / requests
    ~ response.text
    ~ response.content.decode('...')
2. 解析页面 -----> 正则表达式 / CSS选择器 / XPath -----> re / beautifulsoup4 / lxml
3. 保存数据 -----> 持久化处理 -----> CSV / Excel -----> csv / xlwt / openpyxl
"""
import bs4
import requests
import openpyxl

headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '
                  'AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/88.0.4324.192 Safari/537.36',
    'Cookie': 'bid=B6fnN3z1OSQ; dbcl2="188221232:CZnhCk3HOCY"; '
              'ck=kVIa; __utmc=30149280; '
              '__utmz=30149280.1614734458.1.1.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; '
              '__utmc=223695111; '
              '__utmz=223695111.1614734459.1.1.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; '
              'push_noty_num=0; push_doumail_num=0; __yadk_uid=PFRSXOdTRtOPZqn50VougY7eZSw1IF6E; '
              'll="118318"; _vwo_uuid_v2=D9054DADFFDCC85A157EBFE597F5406F2|a968d18019d77548fe9a1d6a1ba80c66; '
              '__gads=ID=04818f8eb15c89ff-2242945e38c600d0:T=1614737843:RT=1614737843:S=ALNI_MagQFvuC13T12jpKa3dcG14uRne3w; '
              '__utma=30149280.1853502367.1614734458.1614736433.1614741223.3; __utmb=30149280.0.10.1614741223; '
              '__utma=223695111.455748548.1614734459.1614736433.1614741223.3; __utmb=223695111.0.10.1614741223; '
              '_pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1614741223%2C%22https%3A%2F%2Faccounts.douban.com%2F%22%5D; '
              '_pk_ses.100001.4cf6=*; _pk_id.100001.4cf6=8202782d976a3924.1614734458.3.1614741239.1614737841.'
}
# proxies = {
#     'https': 'socks5://127.0.0.1:1086'
# }


def fetch_movie_detail(url):
    """根据指定的URL抓取电影的详情信息（类型和时长）"""
    resp = requests.get(url=url, headers=headers)
    soup = bs4.BeautifulSoup(resp.text, 'html.parser')
    genre_spans = soup.select('span[property="v:genre"]')
    runtime_span = soup.select_one('span[property="v:runtime"]')
    gener = '/'.join([genre_span.text for genre_span in genre_spans])
    runtime = runtime_span.attrs['content']
    return [gener, runtime]


def main():
    # wb = xlwt.Workbook()
    wb = openpyxl.Workbook()
    # sheet = wb.add_sheet('Top250')
    sheet = wb.create_sheet('Top250')
    row_no = 0
    column_names = ('编号', '片名', '评分', '名句', '类型', '时长')
    for col, column_name in enumerate(column_names):
        # sheet.write(row_no, col, column_name)
        sheet.cell(row_no + 1, col + 1, column_name)
    try:
        for page in range(1):
            resp = requests.get(
                url=f'https://movie.douban.com/top250?start={page * 25}',
                headers=headers
            )
            if resp.status_code == 200:
                soup = bs4.BeautifulSoup(resp.text, 'html.parser')
                anchors = soup.select('div.info > div.hd > a')
                title_spans = soup.select('div.info > div.hd > a > span:nth-child(1)')
                rating_spans = soup.select('span.rating_num')
                quote_spans = soup.select('p.quote > span')
                for anchor, title_span, rating_span, quote_span in zip(anchors, title_spans, rating_spans, quote_spans):
                    row_no += 1
                    detail_url = anchor.attrs['href']
                    movie_infos = [row_no, title_span.text, rating_span.text, quote_span.text]
                    movie_infos += fetch_movie_detail(detail_url)
                    print(movie_infos)
                    for col, info in enumerate(movie_infos):
                        # sheet.write(row_no, col, info)
                        sheet.cell(row_no + 1, col + 1, info)
            else:
                print(f'请求失败，响应状态码：{resp.status_code}')
    except Exception as err:
        print(err)
    finally:
        # wb.save('豆瓣电影.xls')
        wb.save('豆瓣电影.xlsx')


if __name__ == '__main__':
    main()
