import random
import time

import bs4
import requests

# 商业爬虫项目，要提前创建好代理池（很多组IP代理，失效的代理会被移除，代理会定时更新）
# 付费商业代理一般购买之后都是提供一个网络API接口（URL），通过请求这个接口就可以获得代理的信息
# 我们使用的蘑菇代理提供的API接口返回JSON格式的数据，可以通过Response对象的json()方法将
# 返回的JSON的数据处理成字典，再从中提取出IP代理的信息
resp = requests.get('http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=d36d8b6f1703481eb6c07cc78b3be0c1&count=5&expiryDate=0&format=1&newLine=2')
proxy_list = resp.json()['msg']
print(proxy_list)
# proxy_list = [{'port': '26718', 'ip': '113.75.139.11'}, {'port': '40467', 'ip': '121.232.55.96'}, {'port': '44123', 'ip': '183.44.113.12'}, {'port': '22014', 'ip': '117.87.57.43'}, {'port': '31783', 'ip': '183.159.88.15'}]

for page in range(10):
    proxy_dict = random.choice(proxy_list)
    ip, port = proxy_dict['ip'], proxy_dict['port']
    print(ip, port)
    try:
        resp = requests.get(
            url=f'https://movie.douban.com/top250?start={page * 25}',
            # 商业爬虫项目，需要提前创建好一个cookies池，每次请求从池子中随机选择一组Cookie信息
            headers={
                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
                # 'Cookie': 'bid=9tbJ5lg3EHY; ll="118318"; __gads=ID=8e5ede8492f343e5-228c02938dc50013:T=1609895704:RT=1609895704:S=ALNI_MbF7jrN7OkZswzWiduDTACkHtaUNA; _vwo_uuid_v2=DCCA6505F9E010BD88EB9414CC5449D0C|b7097d82dbf3ccd1e426fd8dce1d7987; __utmc=30149280; __utma=30149280.1808228145.1609895643.1609913756.1609915921.3; __utmz=30149280.1609915921.3.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.1705991737.1609895643.1609895643.1609915921.2; __utmb=223695111.0.10.1609915921; __utmc=223695111; __utmz=223695111.1609915921.2.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1609915921%2C%22https%3A%2F%2Faccounts.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __yadk_uid=8S3BRIvsxFz0jkjUWI7n2VB0WZlWCBkY; push_noty_num=0; push_doumail_num=0; __utmt=1; __utmb=30149280.2.10.1609915921; dbcl2="188221232:aXHMZ9eJ7Ew"; ck=KnnH; _pk_id.100001.4cf6=5b0852336e0a329b.1609895642.2.1609916216.1609899283.'
            },
            proxies={
                'https': f'http://{ip}:{port}',
                # 'https': 'http://124.112.174.38:30873'
            },
            timeout=3
        )
    except requests.exceptions.ConnectTimeout:
        print('连接超时')
    except requests.exceptions.ProxyError:
        print('代理失效')
    else:
        if resp.status_code == 200:
            soup = bs4.BeautifulSoup(resp.text, 'html.parser')
            # select方法会通过CSS选择器定位页面元素
            spans = soup.select('div.hd > a > span:nth-child(1)')
            for span in spans:
                # 通过标签对象的text属性获取标签中文字
                print(span.text)
            time.sleep(random.random() * 3 + 3)
        else:
            print('无法获取页面...')
            break
