import urllib.request
import http.cookiejar


def proxyDemo(url):
    # url = "http://news.163.com/18/0306/09/DC71LRR20001875N.html"
    # 以字典的形式设置headers
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        # "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
        "Connection": "keep-alive",
        "referer": "http://www.163.com"
    }
    # 设置cookie
    cjar = http.cookiejar.CookieJar()
    proxy = urllib.request.ProxyHandler({'https': "101.236.55.145:8866"})
    opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler, urllib.request.HTTPCookieProcessor(cjar))
    # 建立空列表，为了以制定格式存储头信息
    headall = []
    for key, value in headers.items():
        item = (key, value)
        headall.append(item)
    # 将制定格式的headers信息添加好
    opener.addheaders = headall
    # 将opener安装为全局
    urllib.request.install_opener(opener)
    data = urllib.request.urlopen(url).read()
    # data = data.decode("utf-8")
    print(data)


def proxyDemo02(url, ipDictionary, isSwitchProxy=True, headers=None):
    # 构建了两个代理Handler，一个有代理IP，一个没有代理IP
    http_proxy = urllib.request.ProxyHandler(ipDictionary)
    none_proxy = urllib.request.ProxyHandler({})

    # 构建一个HTTPHandler 处理器对象，支持处理HTTP请求,同时开启Debug Log
    http_handle = urllib.request.HTTPHandler(debuglevel=1)

    # 设置cookie
    http_cjar = http.cookiejar.CookieJar()
    http_cookie = urllib.request.HTTPCookieProcessor(http_cjar)
    opener = None
    # 设置是否开启代理
    if isSwitchProxy:
        opener = urllib.request.build_opener(http_proxy, http_handle, http_cookie)
    else:
        opener = urllib.request.build_opener(none_proxy, http_handle, http_cookie)

    # 建立空列表，为了以制定格式存储头信息
    headall = []
    for key, value in headers.items():
        item = (key, value)
        headall.append(item)
    # 将制定格式的headers信息添加好
    opener.addheaders = headall
    # 将opener安装位全局
    urllib.request.install_opener(opener)

    # 构建 Request请求
    req = urllib.request.Request(url)

    # 调用自定义opener对象的open()方法，发送request请求
    # data = opener.open(req)
    # 或者这种这个方法
    data = urllib.request.urlopen(req).read()
    print(data)
    return data


url = "http://news.163.com/18/0306/09/DC71LRR20001875N.html"
# 以字典的形式设置headers
headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    # "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
    "Connection": "keep-alive",
    "referer": "http://www.163.com"
}
# proxyDemo02("http://www.xicidaili.com/nn/", isSwitchProxy=True, headers=headers)
