import requests
from bs4 import BeautifulSoup
import re
import random
import time


class download():

    def __init__(self):
        self.url = 'http://www.xicidaili.com/nn/'
        user_agent_list = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        UA = random.choice(user_agent_list)  # 随机取出字符串
        self.headers = {'User-Agent': UA}  # 构造成一个随机完整的User-Agent

    def proxy_iplist(self):  # 获取ip字典
        html = requests.get(self.url, headers=self.headers).text

        table = BeautifulSoup(html, 'lxml').find(
            'table', attrs={"id": 'ip_list'}).find_all('tr')
        proxy_iplist = []
        for item in table[1:]:
            lists = item.find_all('td')
            ip = {'ip': '', 'port': ''}
            if lists[5].get_text() == 'HTTPS':
                ip['ip'] = lists[1].get_text()
                ip['port'] = lists[2].get_text()
                proxy_iplist.append(ip)
        return proxy_iplist

    def get(self, url, timeout, num_retries, proxy=None):  # 获取代理ip字典集合

        proxy_iplist = self.proxy_iplist()  #调用获取代理地址方法
        print(proxy_iplist)

        # if proxy == None:  # 当代理为空时
        #     try:
        #         res = requests.get(
        #             url, headers=self.headers, timeout=timeout)
        #         if res.status_code == 200:    #请求页面成功返回页面内容
        #             return res

        #         else:                         #请求页面失败时
        #             proxy_iplist = self.proxy_iplist()  #调用获取代理地址方法
        #             print(proxy_iplist)
        #             # proxy = random.choice(list(d.items()))
        #             # return self.get(url,timeout,proxy,) #使用代理访问页面

        #     except:  # 如过上面的代码执行报错则执行下面的代码
        #         print('执行到这里')
                # if num_retries > 0: #num_retries是我们限定的重试次数
                #     time.sleep(10) #延迟十秒
                #     print('获取网页出错，10S后将获取倒数第:'+num_retries+'次')
                #     return self.get(url,timeout,num_retries-1)  ##调用自身 并将次数减1
                # else:
                #     print('开始使用代理')
                #     time.sleep(10)
                #     proxy_iplist=self.proxy_iplist()
                #     proxy = random.choice(list(d.items()))
                #     return self.get(url,timeout,proxy,) ##代理不为空的时候


request = download()

