#!/usr/local/bin/python3

__author__ = '曹植'

import requests

from log import logger
from bs4 import BeautifulSoup
from resource_manager.useragents import user_agents_manager
from resource_manager.proxy_ip import get_ip
import re
import time
import random
from setting import XICI_ENABLED, KUAI_ENABLED, IP3366_ENABLED, DAILI666_ENABLED, DATA5U_ENABLED, XIAOER_ENABLED, \
    QIYUN_ENABLED, DAILI89_ENABLED, GOUBANJIA_ENABLED, IPHAI_ENABLED, XSDAILI_ENABLED, LABPROXY_ENABLED, SHENJI_ENABLED, \
    NIMA_ENABLED, IHUAN_ENABLED


class ProxyMetaclass(type):
    def __new__(cls, name, bases, attrs):
        count = 0
        attrs['__CrawlFunc__'] = []
        for k, v in attrs.items():
            if 'crawl_' in k:
                attrs['__CrawlFunc__'].append(k)
                count += 1
        attrs['__CrawlFuncCount__'] = count
        return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
    """
       代理模块
    """

    def get_proxies(self, callback):
        proxies = []
        for proxy in eval('self.{}()'.format(callback)):
            """s
            eval将字符串str当成有效的表达式来求值并返回计算结果
            """
            print('成功获取代理', proxy)
            proxies.append(str(proxy))
        return proxies

    def __init__(self):
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'User-Agent': user_agents_manager()
        }
        self.proxy = {
            'https': 'socks5:{}'.format(get_ip())
        }

    def crawl_xici(self, page=4):
        """
        西刺代理
        :param page:
        :return:
        """
        try:
            if XICI_ENABLED:
                url = 'https://www.xicidaili.com/nn/'
                print('https://www.xicidaili.com/nn/' + ' 爬取中')
                for i in range(1, page + 1):
                    res = requests.get(url=url + str(page), headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table', attrs={'id': 'ip_list'}).find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[1].get_text()
                            port = tr.find_all('td')[2].get_text()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('https://www.xicidaili.com/nn/', e))

    def crawl_kuaidaili(self):
        """
        快代理
        :return:
        """
        try:
            if KUAI_ENABLED:
                print('http://www.kuaidaili.com/free/inha' + ' 爬取中')
                for page in range(1, 4):
                    url = 'https://www.kuaidaili.com/free/inha/{}/'.format(page)
                    res = requests.get(url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text()
                            port = tr.find_all('td')[1].get_text()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.kuaidaili.com/free/inha', e))

    def crawl_ip3366(self):
        """
        云代理
        :return:
        """
        try:
            if IP3366_ENABLED:
                print('http://www.ip3366.net/free/' + ' 爬取中')
                for page in range(1, 4):
                    start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
                    res = requests.get(url=start_url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('tbody').find_all('tr')
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text()
                            port = tr.find_all('td')[1].get_text()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.ip3366.net/free/', e))

    def crawl_daili666(self, page_count=4):
        """
        代理66
        """
        try:
            if DAILI666_ENABLED:
                start_url = 'http://www.66ip.cn/{}.html'
                print('http://www.66ip.cn' + ' 爬取中')
                urls = [start_url.format(page) for page in range(2, page_count + 1)]
                for url in urls:
                    res = requests.get(url=url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text()
                            port = tr.find_all('td')[1].get_text()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.66ip.cn/', e))

    def crawl_data5u(self):
        """
        无忧代理
        :return:
        """
        try:
            if DATA5U_ENABLED:
                print('http://www.data5u.com/' + ' 爬取中')
                for page in range(1, 3):
                    start_url = 'http://www.data5u.com/'
                    res = requests.get(url=start_url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    ul_list = soup.find_all('ul', attrs={'class': 'l2'})
                    if ul_list:
                        for ul in ul_list:
                            ip = ul.find_all('span')[0].find('li').get_text()
                            port = ul.find_all('span')[1].find('li').get_text()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.data5u.com/', e))

    def crawl_xiaoer(self):
        """
        小二免费代理
        :return:
        """
        try:
            if XIAOER_ENABLED:
                print('http://www.xiaoerdaili.com/faq/' + ' 爬取中')
                res = requests.get('http://www.xiaoerdaili.com/faq/', headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                start_url = soup.find('a', attrs={'class': 'titles-h1'})['href']
                res = requests.get(url=start_url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                p_list = soup.find('div', attrs={'id': 'content'}).find_all('p')
                if p_list:
                    for p in p_list:
                        s = p.get_text()
                        yield re.search(r'\d+\.\d+\.\d+\.\d+\:\d+', s[:s.index('[')], re.S).group()
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.xiaoerdaili.com/faq/', e))
        except ValueError:
            pass

    def crawl_qiyun(self):
        """
        旗云代理
        :return:
        """
        try:
            if QIYUN_ENABLED:
                print('http://www.qydaili.com/free/' + ' 爬取中')
                for page in range(1, 4):
                    url = 'http://www.qydaili.com/free/?action=china&page={}'.format(page)
                    res = requests.get(url=url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text().strip()
                            port = tr.find_all('td')[1].get_text().strip()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.qydaili.com/free/', e))

    def crawl_89(self):
        """
        89代理
        :return:
        """
        try:
            if DAILI89_ENABLED:
                print('http://www.89ip.cn/index.html' + ' 爬取中')
                for page in range(1, 3):
                    url = 'http://www.89ip.cn/index_{}.html'.format(page)
                    res = requests.get(url=url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text().strip()
                            port = tr.find_all('td')[1].get_text().strip()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.89ip.cn/index.html', e))

    def crawl_goubanjia(self):
        """
        全网代理
        :return:
        """
        try:
            if GOUBANJIA_ENABLED:
                print('http://www.goubanjia.com/' + ' 爬取中')
                url = 'http://www.goubanjia.com/'
                res = requests.get(url=url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                tr_list = soup.find('table').find_all('tr')[1:]
                if tr_list:
                    for tr in tr_list:
                        ip = tr.find_all('td')[0].get_text().strip()
                        if '高匿' in tr.find_all('td')[1].get_text().strip():
                            yield ip.replace('..', '.')
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.goubanjia.com/', e))

    def crawl_iphai(self):
        """
        ip海代理
        :return:
        """
        try:
            if IPHAI_ENABLED:
                print('http://www.iphai.com/free/ng' + ' 爬取中')
                url = 'http://www.iphai.com/free/ng'
                res = requests.get(url=url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                tr_list = soup.find('table').find_all('tr')[1:]
                if tr_list:
                    for tr in tr_list:
                        ip = tr.find_all('td')[0].get_text().strip()
                        port = tr.find_all('td')[1].get_text().strip()
                        yield ':'.join([ip, port])
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.iphai.com/free/ng', e))

    def crawl_xsdaili(self):
        """
        小舒代理
        :return:
        """
        try:
            if XSDAILI_ENABLED:
                print('http://www.xsdaili.com/' + ' 爬取中')
                res = requests.get('http://www.xsdaili.com/', headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                start_url = soup.find('div', attrs={'class': 'title'}).find('a')['href']
                res = requests.get(url='http://www.xsdaili.com/' + start_url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                br_text = soup.find('div', attrs={'class': 'cont'}).get_text().strip()
                br_list = re.findall(r'\d+\.\d+\.\d+\.\d+\:\d+', br_text, re.S)
                if br_list:
                    for br in br_list:
                        yield br
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.xsdaili.com/', e))

    def crawl_labproxy(self):
        """
        免费代理工具
        :return:
        """
        try:
            if LABPROXY_ENABLED:
                print('https://lab.crossincode.com/proxy/' + ' 爬取中')
                url = 'https://lab.crossincode.com/proxy/'
                res = requests.get(url=url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                tr_list = soup.find('table').find_all('tr')[1:]
                if tr_list:
                    for tr in tr_list:
                        ip = tr.find_all('td')[0].get_text().strip()
                        port = tr.find_all('td')[1].get_text().strip()
                        yield ':'.join([ip, port])
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('https://lab.crossincode.com/proxy/', e))

    def crawl_shenjidaili(self):
        """
        神鸡代理IP
        :return:
        """
        try:
            if SHENJI_ENABLED:
                print('http://www.shenjidaili.com/open/' + ' 爬取中')
                url = 'http://www.shenjidaili.com/open/'
                res = requests.get(url=url, headers=self.headers)
                res.encoding = 'utf-8'
                soup = BeautifulSoup(res.text, 'lxml')
                tr_list = soup.find('table').find_all('tr')[1:]
                if tr_list:
                    for tr in tr_list:
                        ip = tr.find_all('td')[0].get_text().strip()
                        yield ip
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.shenjidaili.com/open/', e))

    def crawl_nimadaili(self):
        """
        泥马代理
        :return:
        """
        try:
            if NIMA_ENABLED:
                print('http://www.nimadaili.com/' + ' 爬取中')
                for page in range(1, 6):
                    url = 'http://www.nimadaili.com/gaoni/{}/'.format(page)
                    res = requests.get(url=url, headers=self.headers)
                    res.encoding = 'utf-8'
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text().strip()
                            yield ip
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('http://www.nimadaili.com/', e))

    def crawl_ihuan(self):
        """
        小幻代理
        :return:
        """
        try:
            if IHUAN_ENABLED:
                print('https://ip.ihuan.me/' + ' 爬取中')
                for page in ['b97827cc', '4ce63706', '5crfe930', 'f3k1d581']:
                    url = 'https://ip.ihuan.me/anonymity/2.html?page={}'.format(page)
                    res = requests.get(url=url, headers=self.headers)
                    soup = BeautifulSoup(res.text, 'lxml')
                    tr_list = soup.find('table').find_all('tr')[1:]
                    if tr_list:
                        for tr in tr_list:
                            ip = tr.find_all('td')[0].get_text().strip()
                            port = tr.find_all('td')[1].get_text().strip()
                            yield ':'.join([ip, port])
                    time.sleep(random.uniform(1, 3))
        except AttributeError as e:
            logger.info('{}爬虫出错,{}'.format('https://ip.ihuan.me/', e))


if __name__ == '__main__':
    s = Crawler()
    for i in s.crawl_nimadaili():
        print(i)
