from IPProxyPool.core.proxy_spider.base_spider import BaseSpider
from pyquery import PyQuery as pq
import re
from IPProxyPool.domain import Proxy
import base64
import json
import requests
from IPProxyPool.utils.http import get_request_headers


# 通过继承通用爬虫, 实现多个爬虫, 分布从各大免费代理IP网站上抓取代理IP

# 免费代理IP http://ip.yqie.com/ipproxy.htm
class YpieSpider(BaseSpider):
    # 准备url列表
    urls = ['http://ip.yqie.com/proxygaoni/index_{}.htm'.format(i) for i in range(1, 11)]

    # 跟组的css,包含ip信息的列表容器
    group_css = '#GridViewOrder  > tr:nth-child(n+2)'
    # ip信息
    detail_css = {
        'ip': 'td:nth-child(2)',
        'port': 'td:nth-child(3)',
        'area': 'td:nth-child(4)'
    }

    # 因为此网站ip要base64解码 所以要重写get_proxies_from_page方法
    def get_proxies_from_page(self, page):
        # 解析数据，封装成Proxy对象返回
        html = pq(page)
        # 获取标签列表
        trs = html(self.group_css)
        # 遍历trs获取代理ip信息
        for tr in trs.items():
            test_ip = tr(self.detail_css['ip']).text()
            base64_ip = re.search(r'document.write\(window.atob\("(.*)"\)\);', test_ip)
            if base64_ip:
                ip = base64.b64decode(base64_ip.group(1)).decode()
            else:
                ip = test_ip
            port = tr(self.detail_css['port']).text()
            area = tr(self.detail_css['area']).text()
            proxy = Proxy(ip, port, area=area)
            yield proxy


# https://www.89ip.cn/index_1.html
class Ip89Spider(BaseSpider):
    # 准备url列表
    urls = ['https://www.89ip.cn/index_{}.html'.format(i) for i in range(1, 11)]

    # 跟组的css,包含ip信息的列表容器
    group_css = 'div.layui-form > table > tbody > tr'
    # ip信息
    detail_css = {
        'ip': 'td:nth-child(1)',
        'port': 'td:nth-child(2)',
        'area': 'td:nth-child(3)'
    }


# https://github.com/jiangxianli/ProxyIpLib#%E5%85%8D%E8%B4%B9%E4%BB%A3%E7%90%86ip%E5%BA%93 此网站返回json 要重写一些方法
class JiangxianliSpider(BaseSpider):
    # 准备url列表
    urls = ['https://ip.jiangxianli.com/api/proxy_ips?page={}&order_by=validated_at'.format(i) for i in range(1, 15)]

    # 该网站返回json html选择器不用传 重写方法就行了
    def get_proxies_from_page(self, page):
        # 获取容器
        doc = json.loads(page)['data']['data']
        # 循环ip
        for item in doc:
            ip = item['ip']
            port = item['port']
            area = item['ip_address']
            proxy = Proxy(ip, port, area=area)
            yield proxy


# https://www.kuaidaili.com/free/inha/1/
class KuaidailiSpider(BaseSpider):
    # 准备url列表
    urls = ['https://www.kuaidaili.com/free/inha/{}/'.format(i) for i in range(1, 11)]

    # 跟组的css,包含ip信息的列表容器
    group_css = '#list > table > tbody > tr'
    # ip信息
    detail_css = {
        'ip': 'td:nth-child(1)',
        'port': 'td:nth-child(2)',
        'area': 'td:nth-child(5)'
    }


# http://www.xiladaili.com/gaoni/  ip和port在一起 需要重写方法
class XilaSpider(BaseSpider):
    # 准备url列表
    urls = ['http://www.xiladaili.com/gaoni/{}/'.format(i) for i in range(1, 11)]

    def get_proxies_from_page(self, page):
        html = pq(page)
        trs = html('div.mt-0.mb-2.table-responsive > table > tbody > tr')
        for i in trs.items():
            test_ip = i('td:nth-child(1)').text().split(':')
            ip = test_ip[0]
            port = test_ip[1]
            area = i('td:nth-child(4)').text()
            proxy = Proxy(ip, port, area=area)
            yield proxy


# https://ip.ihuan.me/
class XiaohuanSpider(BaseSpider):
    # 准备url列表
    pages = ['b97827cc', '4ce63706', '5crfe930', 'f3k1d581', 'ce1d45977', '881aaf7b5', 'eas7a436', '981o917f5',
             '2d28bd81a', 'a42g5985d', '']
    urls = ['https://ip.ihuan.me/?page={}'.format(i) for i in pages]

    # 跟组的css,包含ip信息的列表容器
    group_css = 'div.table-responsive > table > tbody > tr'
    # ip信息
    detail_css = {
        'ip': 'td:nth-child(1) > a',
        'port': 'td:nth-child(2)',
        'area': 'td:nth-child(3)>a'
    }


# http://www.ip3366.net/free/?stype=1&page=1  转码格式为GBK
class YundailiSpider(BaseSpider):
    urls = ['http://www.ip3366.net/free/?stype={}&page={}'.format(i, j) for i in range(1, 3) for j in range(1, 8)]

    def get_page_from_url(self, url):
        # 发送请求提取液面
        response = requests.get(url, headers=get_request_headers())
        return response.content.decode('GBK')

    # 跟组的css,包含ip信息的列表容器
    group_css = '#list > table > tbody > tr'
    # ip信息
    detail_css = {
        'ip': 'td:nth-child(1)',
        'port': 'td:nth-child(2)',
        'area': 'td:nth-child(5)'
    }


if __name__ == '__main__':
    # spider = YpieSpider()
    # for i in spider.get_proxies():
    #     print(i)

    # spider = Ip89Spider()
    # for i in spider.get_proxies():
    #     print(i)

    # spider = JiangxianliSpider()
    # for i in spider.get_proxies():
    #     print(i)

    # spider = KuaidailiSpider()
    # for i in spider.get_proxies():
    #     print(i)

    # spider = XilaSpider()
    # for i in spider.get_proxies():
    #     print(i)

    # spider = XiaohuanSpider()
    # for i in spider.get_proxies():
    #     print(i)

    spider = YundailiSpider()
    for i in spider.get_proxies():
        print(i)
