import ssl
import urllib.request
import xml.etree.cElementTree as ET
from bs4 import BeautifulSoup
import random


def getip(proxie):
    proxy_ip = random.choice(proxie).replace("\n", "")
    proxies = {'https': proxy_ip}
    print(proxies)
    return proxies


def openurl(url, ip={}):
    ssl._create_default_https_context = ssl._create_unverified_context
    proxy_support = urllib.request.ProxyHandler(ip)
    opener = urllib.request.build_opener(proxy_support)
    opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0')]
    r = opener.open(url, timeout=0.5)
    return r.read().decode('gbk', 'ignore')


def ipchi(type):
    if type == 'HTTPS':
        ipurl = "http://www.xicidaili.com/wn/"  # https
    else:
        ipurl = "http://www.xicidaili.com/wt/"  # http
    httplist = []
    for nn in range(1, 10):
        html = openurl(ipurl + str(nn))
        html = BeautifulSoup(html, "lxml").find_all('tr')
        for i in range(1, len(html) - 1):
            root = ET.fromstring(str(html[i]))
            if root[5].text == type:
                httpip = root[1].text + ':' + root[2].text
                try:
                    openurl('http://www.xicidaili.com/wn/', {type.lower(): httpip})
                    print('ok:', {type.lower(): httpip})
                    httplist.append(httpip)
                    with open(type + '.txt', 'a+')as f:
                        f.write(httpip + '\n')
                except:
                    print('can\'t connect!', {type.lower(): httpip})
    print(httplist)


def useip():
    with open('https.txt', 'r')as f:
        aa = []
        for line in f.readlines():
            aa.append(line.replace("\n", ""))
        for i in range(len(aa)):
            ipp = getip(aa)
            try:
                openurl('要爬的网站', ipp)
                print('ok')
            except:
                print('no')


if __name__ == '__main__':
    ipchi('HTTP')
    # useip()
