from bs4 import BeautifulSoup
import requests
import random


class RandomIP:
    def __init__(self):
        self.url = 'https://www.kuaidaili.com/free/intr/'
        self.url2 = 'https://www.xicidaili.com/nt/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
        }
        self.HEADER = {
            "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36"}
        self.INDEX = 1

    def __get_ip_list(self, url, headers):
        """
        得到url网页中所有的IP地址
        :param url: 代理IP网址
        :param headers:
        :return: 代理IP+端口号
        """
        try:
            web_data = requests.get(url, headers=headers, timeout=2)
        except Exception as e:
            return []
        soup = BeautifulSoup(web_data.text, 'lxml')
        ips = soup.find_all('tr')
        ip_list = []
        print("www.kuaidaili.com/free/intr/")
        for i in range(1, len(ips)):
            ip_info = ips[i]
            tds = ip_info.find_all('td')
            ip_list.append(tds[0].text + ':' + tds[1].text)
        return ip_list

    def __get_ip_list_2(self, url, headers):
        """
        得到url2网页中所有的IP地址
        :param url: 代理IP网址
        :param headers:
        :return: 代理IP+端口号
        """
        try:
            web_data = requests.get(url, headers=headers, timeout=2)
        except Exception as e:
            return []
        soup = BeautifulSoup(web_data.text, 'lxml')
        ips = soup.find_all('tr')
        ip_list = []
        print("www.xicidaili.com")
        for i in range(1, len(ips)):
            ip_info = ips[i]
            tds = ip_info.find_all('td')
            ip_list.append(tds[1].text + ':' + tds[2].text)
        return ip_list

    # 可以尝试解析更多的代理IP网站

    def __get_all_ip(self, index):
        """
        获取真正可用的代理IP
        :return: 可用的代理IP
        """
        proxy_list = []
        ip_list = []
        if self.INDEX != index:
            ip_list = self.__get_ip_list(self.url + index, self.headers)
            if len(ip_list) == 0:
                ip_list = self.__get_ip_list_2(self.url2 + index, self.headers)
            # 可以尝试添加更多的代理IP接口
            INDEX = index
        for ip in ip_list:
            proxy_list.append('http://' + ip)
        # proxy_ip = random.choice(proxy_list)
        # proxies = {'http': proxy_ip}
        return proxy_list

    # 得到可用的代理IP
    def proxy_ip(self, url):
        """
        :param url: 要爬取的url
        :return: 得到的文档和可用的代理IP
        """
        index = random.randint(0, 20)
        IPStatus = True
        # 使用代理IP来爬取
        while IPStatus:
            proxy_list = self.__get_all_ip(str(index))
            index += 1
            print(index)
            # 尝试每一个代理IP
            for proxy_ip in proxy_list:
                proxies = {'http': proxy_ip}
                print(proxies)
                try:
                    print(requests.get(url, headers=self.HEADER, proxies=proxies, timeout=2))
                    print("get proxy_ip-------------")
                    return proxies
                except Exception as e:
                    continue

    def proxy_spider(self, url):
        """
        :param url: 要爬取的url
        :return: 得到的文档和可用的代理IP
        """
        index = random.randint(0, 20)
        IPStatus = True
        # 使用代理IP来爬取
        while IPStatus:
            proxy_list = self.__get_all_ip(str(index))
            index += 1
            print(index)
            # 尝试每一个代理IP
            for proxy_ip in proxy_list:
                proxies = {'http': proxy_ip}
                print(proxies)
                try:
                    response = requests.get(url, headers=self.HEADER, proxies=proxies, timeout=2)
                    return response
                except Exception as e:
                    continue

# 测试
if __name__ == '__main__':
    rand_ip = RandomIP()
    print(rand_ip.proxy_spider('http://www.ireadweek.com'))
