# coding:utf-8
import json
import requests
from pyquery import PyQuery as pq


class Crawler:
    def __init__(self):
        '''
            作用：抓取不同代理网站的代理IP:端口。
            __init__方法，自动调取此类内所有的方法，之后可以自己添加其他网站的抓取方法，以crawl开头
        '''
        self.methods = dir(self)
    
    def get_proxies(self, method):
        '''
            作用：通过eval对字符串进行转化，使其运行方法。继而获取不同方法的代理数据，进行循环保存到列表中
            :param method:需要调用的方法
            :return 每个方法内获取的代理的列表
        '''
        proxies = []
        for proxy in eval('self.{}()'.format(method)):
            print('成功获取到代理：', proxy)
            proxies.append(proxy)

        return proxies


    def __getPage(self, url):
        '''
            作用：获取网页页面
            :return 网页页面文本
        '''
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
            'Accept-Encoding': 'gzip, deflate, sdch',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
        }
        try:
            response = requests.get(url, headers=headers)
            print('抓取成功，', url, response.status_code)
            if response.status_code == 200:
                return response.text
        except ConnectionError:
            print('抓取失败')
            return None


    def crawl_66ip(self, page_count=4):
        '''
            作用：获取66ip(www.66ip.cn)的代理
            :param page_count: 页数
            :return 代理
        '''
        base_url = 'http://www.66ip.cn/{}.html'

        for page in range(1, page_count+1):
            url = base_url.format(page)
            print('crawling', url)
            html = self.__getPage(url)
            if html:
                doc = pq(html)
                trs = doc('#main table tr:gt(0)').items()
                for tr in trs:
                    ip = tr.find('td:nth-child(1)').text()
                    port = tr.find('td:nth-child(2)').text()
                    yield ':'.join([ip, port])

    def crawl_xicidaili(self, page_count=3):
        '''
            作用：获取西刺代理(www.xicidaili.com)的代理
            :param page_count: 页数
            :return 代理
        '''
        base_url = 'http://www.xicidaili.com/nn/{}'

        for page in range(1, page_count+1):
            url = base_url.format(page)
            print('crawling', url)
            html = self.__getPage(url)
            if html:
                doc = pq(html)
                trs = doc('#ip_list tr:gt(0)').items()
                for tr in trs:
                    ip = tr.find('td:nth-child(2)').text()
                    port = tr.find('td:nth-child(3)').text()
                    yield ':'.join([ip, port])


    def crawl_89ip(self, page_count=4):
        '''
            作用：获取89ip代理(www.89ip.cn)的代理
            :param page_count: 页数
            :return 代理
        '''
        base_url = 'http://www.89ip.cn/index_{}.html'

        for page in range(1, page_count+1):
            url = base_url.format(page)
            print('crawling', url)
            html = self.__getPage(url)
            if html:
                doc = pq(html)
                trs = doc('table tbody tr').items()
                for tr in trs:
                    ip = tr.find('td:nth-child(1)').text()
                    port = tr.find('td:nth-child(2)').text()
                    yield ':'.join([ip, port])

