"""
通过设置代理访问豆瓣网 ---> proxies参数

在商业爬虫项目中，使用IP代理时，通常需要先建立一个代理池
    ~ 加载代理服务器数据并保存
    ~ 监控所有的代理服务器，及时移除失效的服务器
    ~ 提供一个接口能够随机选中某个代理服务器
"""
import random

import bs4
import requests
import openpyxl

headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '
                  'AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/88.0.4324.192 Safari/537.36',
}
all_proxies = []


def load_proxies():
    """通过蘑菇代理获取代理服务器信息"""
    global all_proxies
    while True:
        resp = requests.get(
            'http://piping.mogumiao.com/proxy/api/get_ip_bs?'
            'appKey=20c3b5de1fa14df7baeb608aa09971de&count=5&expiryDate=0&format=1&newLine=2'
        )
        data = resp.json()
        if data['code'] == '0':
            all_proxies = data['msg']


def fetch_movie_detail(url):
    """根据指定的URL抓取电影的详情信息（类型和时长）"""
    # 随机选中一个代理服务器
    proxy = random.choice(all_proxies)
    resp = requests.get(url=url, headers=headers, proxies={
        'http': f'http://{proxy["ip"]}:{proxy["port"]}',
        'https': f'http://{proxy["ip"]}:{proxy["port"]}'
    })
    soup = bs4.BeautifulSoup(resp.text, 'html.parser')
    genre_spans = soup.select('span[property="v:genre"]')
    runtime_span = soup.select_one('span[property="v:runtime"]')
    gener = '/'.join([genre_span.text for genre_span in genre_spans])
    runtime = runtime_span.attrs['content']
    return [gener, runtime]


def main():
    # 首先通过商业IP代理加载代理服务器
    load_proxies()
    # wb = xlwt.Workbook()
    wb = openpyxl.Workbook()
    # sheet = wb.add_sheet('Top250')
    sheet = wb.create_sheet('Top250')
    row_no = 0
    column_names = ('编号', '片名', '评分', '名句', '类型', '时长')
    for col, column_name in enumerate(column_names):
        # sheet.write(row_no, col, column_name)
        sheet.cell(row_no + 1, col + 1, column_name)
    try:
        for page in range(1):
            # 随机选中一个代理服务器
            proxy = random.choice(all_proxies)
            resp = requests.get(
                url=f'https://movie.douban.com/top250?start={page * 25}',
                headers=headers,
                proxies={
                    'http': f'http://{proxy["ip"]}:{proxy["port"]}',
                    'https': f'http://{proxy["ip"]}:{proxy["port"]}'
                }
            )
            if resp.status_code == 200:
                soup = bs4.BeautifulSoup(resp.text, 'html.parser')
                anchors = soup.select('div.info > div.hd > a')
                title_spans = soup.select('div.info > div.hd > a > span:nth-child(1)')
                rating_spans = soup.select('span.rating_num')
                quote_spans = soup.select('p.quote > span')
                for anchor, title_span, rating_span, quote_span in zip(anchors, title_spans, rating_spans, quote_spans):
                    row_no += 1
                    detail_url = anchor.attrs['href']
                    movie_infos = [row_no, title_span.text, rating_span.text, quote_span.text]
                    movie_infos += fetch_movie_detail(detail_url)
                    print(movie_infos)
                    for col, info in enumerate(movie_infos):
                        # sheet.write(row_no, col, info)
                        sheet.cell(row_no + 1, col + 1, info)
            else:
                print(f'请求失败，响应状态码：{resp.status_code}')
    except Exception as err:
        print(err)
    finally:
        # wb.save('豆瓣电影.xls')
        wb.save('豆瓣电影.xlsx')


if __name__ == '__main__':
    main()
