import time
import random
import requests
import re
import binascii

from lxml import etree
from IPProxyPool.core.proxy_spider.base_spider import BaseSpider
from IPProxyPool.utils.http import get_request_headers
from IPProxyPool.domain import Proxy

"""
六、具体代理ip爬虫：
各种网址
1. 实现ip3366代理爬虫: http://www.ip3366.net/free/?stype=1&page=1
    定义一个类,继承通用爬虫类(BasicSpider)，提供urls, group_xpath 和 detail_xpath
"""


class Ip3366Spider(BaseSpider):
    # 准备URL列表（国内和国外两个域）
    urls = ['http://www.ip3366.net/free/?stype={}&page={}'.format(i, j) for i in range(1, 4, 2) for j in range(1, 4)]
    # # 分组的XPATH, 用于获取包含代理IP信息的标签列表
    group_xpath = '//*[@id="list"]/table/tbody/tr'
    # 组内的XPATH, 用于提取 ip, port, area
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[5]/text()'
    }


"""
2. 实现快代理爬虫: https://www.kuaidaili.com/free/inha/1/
    定义一个类,继承通用爬虫类(BasicSpider)
    提供urls, group_xpath 和 detail_xpath
"""


class KuaiSpider(BaseSpider):
    # 准备URL列表
    urls = ['https://www.kuaidaili.com/free/inha/{}/'.format(i) for i in range(1, 4)]
    # # 分组的XPATH, 用于获取包含代理IP信息的标签列表
    group_xpath = '//*[@id="list"]/div[2]/table/tbody/tr'
    # 组内的XPATH, 用于提取 ip, port, area
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[5]/text()'
    }

    # 当我们两个页面访问时间间隔太短了, 就报错了; 这是一种反爬手段.
    def get_page_from_url(self, url):
        # 随机等待1,3s
        time.sleep(random.uniform(1, 3))
        # 调用父类的方法, 发送请求, 获取响应数据
        return super().get_page_from_url(url)


"""
3. 实现proxylistplus（国外）代理爬虫: https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1
    定义一个类,继承通用爬虫类(BasicSpider)
    提供urls, group_xpath 和 detail_xpath
"""


class ProxyListPlusSpider(BaseSpider):
    # 准备URL列表
    urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-{}'.format(i) for i in range(1, 4)]
    # # 分组的XPATH, 用于获取包含代理IP信息的标签列表
    group_xpath = '//*[@id="page"]/table[2]/tr[position()>2]'
    # 组内的XPATH, 用于提取 ip, port, area
    detail_xpath = {
        'ip': './td[2]/text()',
        'port': './td[3]/text()',
        'area': './td[5]/text()'
    }


"""
4. 实现89免费代理爬虫: https://www.89ip.cn/index_1.html
    定义一个类,继承通用爬虫类(BasicSpider)
    提供urls, group_xpath 和 detail_xpath
"""


class Free89Spider(BaseSpider):
    # 准备URL列表
    urls = ['https://www.89ip.cn/index_{}.html'.format(i) for i in range(1, 3)]
    # # 分组的XPATH, 用于获取包含代理IP信息的标签列表
    group_xpath = '//*[@class="layui-row layui-col-space15"]/div/div/div/table/tbody/tr'
    # 组内的XPATH, 用于提取 ip, port, area
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[3]/text()'
    }

    def get_proxies_from_page(self, page):
        """
        解析页面, 提取数据, 封装为Proxy对象
        """
        element = etree.HTML(page)
        # 获取包含代理IP信息的标签列表
        trs = element.xpath(self.group_xpath)
        # 遍历trs, 获取代理IP相关信息
        for tr in trs:
            ip = self.get_first_from_list(tr.xpath(self.detail_xpath['ip']))
            new_ip = re.sub(r"\n|\t", "", ip)
            port = self.get_first_from_list(tr.xpath(self.detail_xpath['port']))
            new_port = re.sub(r"\n|\t", "", port)
            area = self.get_first_from_list(tr.xpath(self.detail_xpath['area']))
            new_area = re.sub(r"\n|\t| ", "", area).encode("utf-8")  # 此处中文编码有问题
            proxy = Proxy(new_ip, new_port, area=new_area)
            # print(proxy)
            # 使用yield返回提取到的数据
            yield proxy


if __name__ == '__main__':
    # spider = Ip3366Spider()
    # spider = KuaiSpider()
    # spider = ProxyListPlusSpider()
    spider = Free89Spider()
    for proxy in spider.get_proxies():
        print(proxy)
