"""
获取和解析页面内容
"""
import json
import random
import time

import bs4
import requests

# 构建Cookie池
# cookies_pool = []
# cookies = {}
# cookie_string = 'll="118318"; bid=er8-mqEG8go; __utmz=30149280.1592383702.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __gads=ID=6b02098dd0cfdb77:T=1592383730:S=ALNI_MaKKxB47ZEI0vQn9raatWichLJZIA; __utmc=30149280; ap_v=0,6.0; _pk_ses.100001.8cb4=*; __utma=30149280.526786913.1592383702.1592446032.1592448781.3; __utmt=1; dbcl2="188221232:1qzWkX0sQ34"; ck=y3a2; __yadk_uid=r92XvSlqjY0wBZYISb9qlE9IB22RunvG; push_noty_num=0; push_doumail_num=0; __utmv=30149280.18822; _pk_id.100001.8cb4=6a1e1d28b557d54a.1592383698.2.1592449214.1592383698.; __utmb=30149280.8.10.1592448781'
# for content in map(str.strip, cookie_string.split(';')):
#     key, value = content.split('=', maxsplit=1)
#     cookies[key] = value
# cookies_pool.append(cookies)

# # 构建代理池
# proxies_pool = []
# resp = requests.get('http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=8ddbb8f97f7441f28ac5505e30dbcb3d&count=5&expiryDate=0&format=1&newLine=2')
# result = json.loads(resp.content)
# for data in result['msg']:
#     ip = data['ip']
#     port = data['port']
#     proxy = {
#         'http': f'http://{ip}:{port}',
#         'https': f'https://{ip}:{port}'
#     }
#     proxies_pool.append(proxy)
# print(proxies_pool)

for page in range(1, 11):
    resp = requests.get(
        url=f'https://movie.douban.com/top250?start={(page - 1) * 25}',
        # 通过设置请求头将爬虫程序伪装为浏览器
        headers={
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '
                          'AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/83.0.4103.97 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;'
                      'q=0.9,image/webp,image/apng,*/*;'
                      'q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        },
        # proxies=random.choice(proxies_pool)
        # cookies=random.choice(cookies_pool)
    )
    # pattern = re.compile(r'\<span class="title"\>(?P<title>[\u4e00-\u9fa50-9：]*?)\<\/span\>')
    # items = pattern.findall(resp.text)
    # for item in items:
    #     print(item)
    # 创建BeautifulSoup对象来进行页面解析
    soup = bs4.BeautifulSoup(resp.text, features='lxml')
    # 获取包含电影信息的a标签
    elements = soup.select('.info>div>a')
    for element in elements:
        print(element.attrs['href'])
        # 获取a标签下class属性为title的span标签
        span = element.select_one('.title')
        # 获取span标签的内容
        print(span.text)
    # 随机休眠一段时间避免爬取过于频繁
    time.sleep(random.random() * 5)
