from core.proxy_spider.base_spider import BaseSpider
# import requests
# from utils.http import get_request_headers
from lxml import etree
from domain import Proxy


class KuaiSpider(BaseSpider):
    def __init__(self):
        # 准备url列表
        urls = [f'https://www.kuaidaili.com/free/intr/{i}' for i in range(1, 11)]
        # 分组xpath
        group_xpath = "//*[name()='tbody']/tr"
        # 组内xpath
        detail_xpath = {
            'ip': './td[1]/text()',
            'port': './td[2]/text()',
            'area': './td[5]/text()'
        }
        super().__init__(urls, group_xpath, detail_xpath)


class Ip89Spider(BaseSpider):
    def __init__(self):
        urls = [f'https://www.89ip.cn/index_{i}.html' for i in range(1, 11)]
        group_xpath = "//*[name()='tbody']/tr"
        detail_xpath = {
            'ip': './td[1]/text()',
            'port': './td[2]/text()',
            'area': './td[3]/text()'
        }
        super().__init__(urls, group_xpath, detail_xpath)

    # 获取的数据是带希腊字母的中文乱码, 需要重写get_proxies_from_page方法
    def get_proxies_from_page(self, page):
        parser = etree.HTMLParser(encoding='utf-8')
        html = etree.HTML(page, parser=parser)
        trs = html.xpath(self.group_xpath)
        for tr in trs:
            ip = self.get_info(tr.xpath(self.detail_xpath['ip'])).strip()
            port = int(self.get_info(tr.xpath(self.detail_xpath['port'])).strip())
            area = self.get_info(tr.xpath(self.detail_xpath['area'])).strip()
            proxy = Proxy(ip=ip, port=port, area=area)
            yield proxy


class BeesproxySpider(BaseSpider):
    def __init__(self):
        urls = [f'https://www.beesproxy.com/free/page/{i}' for i in range(1, 11)]
        group_xpath = '//*[@id="article-copyright"]/figure/table/tbody/tr'
        detail_xpath = {
            'ip': './td[1]/text()',
            'port': './td[2]/text()',
            'area': './td[3]/text()'
        }
        super().__init__(urls, group_xpath, detail_xpath)


class Ip66Spider(BaseSpider):
    def __init__(self):
        urls = [f'http://www.66ip.cn/{i}.html' for i in range(1, 11)]
        group_xpath = "//*[name()='table']/tr[position()>1]"
        detail_xpath = {
            'ip': './td[1]/text()',
            'port': './td[2]/text()',
            'area': './td[3]/text()'
        }
        super().__init__(urls, group_xpath, detail_xpath)


if __name__ == '__main__':
    # kuai_spider = KuaiSpider()
    # for proxy in kuai_spider.get_proxies():
    #     print(proxy)
    ip89_spider = Ip89Spider()
    for p in ip89_spider.get_proxies():
        print(p)
    # url = 'https://www.beesproxy.com/free/page/2'
    # response = requests.get(url, headers=get_request_headers())
    # print(response.status_code)
    # print(response.content.decode())
    # bees_proxy_spider = BeesproxySpider()
    # for proxy in bees_proxy_spider.get_proxies():
    #     print(proxy)
    # ip66_spider = Ip66Spider()
    # for proxy in ip66_spider.get_proxies():
    #     print(proxy)
