# -*- codeing = utf-8 -*-
#将爬虫使用的ip地址使用代理ip的技术，实现对目标网站的ip地址的封锁
#
#使用代理ip.使用不重复的ip访问网站，躲过ip检测
import requests
import json
# ip_port = ''
# resp = requests.get(url=ip_port)
# data = json.loads(resp.text)['data']
#
# list_1 = []
# for i in data:
#     print(f'{i["ip"]}:{i["port"]}')
#     list_1.append(f'{i["ip"]}:{i["port"]}')
#
# #构造ip地址
# for j in list_1:
#     proxy = {
#         'http': f'http://{j}',
#         'https': f'http://{j}'
#     }
URL = 'https://movie.douban.com/top250'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3877.400 QQBrowser/10.8.4506.400'
}
resp = requests.get(url=URL,headers=headers)
try:
    if resp.status_code==200:
        print(resp.status_code)
except Exception as err:
    pass