import json
import re

from lxml import etree

from .utils import get_page
from pyquery import PyQuery as pq


class ProxyMetaclass(type):
    def __new__(cls, name, bases, attrs):
        count = 0
        attrs['__CrawlFunc__'] = []
        for k, v in attrs.items():
            if 'crawl_' in k:
                attrs['__CrawlFunc__'].append(k)
                count += 1
        attrs['__CrawlFuncCount__'] = count
        return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
    def get_proxies(self, callback):
        proxies = []
        for proxy in eval("self.{}()".format(callback)):
            print('成功获取到代理', proxy)
            proxies.append(proxy)
        return proxies

    def crawl_xiguadaili(self):
        url = "http://api3.xiguadaili.com/ip/?tid=555761992818874&num=100&delay=3&category=2"

        html = get_page(url)

        if html:
            with open("xigua_text", 'w', encoding="utf-8") as f:
                f.write(html)

                proxies = open("xigua_text")

            for proxy in proxies:
                yield proxy

    # def crawl_daili66(self):
    #     """
    #     获取代理66
    #     :return: 代理
    #     """
    #     start_url = 'http://www.66ip.cn/areaindex_{}/1.html'
    #     headers = {
    #         "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
    #         "Cookie": "yd_cookie=6ff7861f-5b66-496086e5975d60e7c3a747a2f692c23b744d; _ydclearance=2e64b45f72d39405fcef5ffc-c41e-46e4-ad65-9e101eb9ad7d-1545712913; Hm_lvt_1761fabf3c988e7f04bec51acd4073f4=1545705735; Hm_lpvt_1761fabf3c988e7f04bec51acd4073f4=1545705956",
    #     }
    #     urls = [start_url.format(page) for page in range(1, 35)]
    #     for url in urls:
    #         print('Crawling', url)
    #         html = get_page(url, options=headers)
    #         if html:
    #             doc = pq(html)
    #             trs = doc('.containerbox table tr:gt(0)').items()
    #             for tr in trs:
    #                 ip = tr.find('td:nth-child(1)').text()
    #                 port = tr.find('td:nth-child(2)').text()
    #                 proxy = ':'.join([ip, port])
    #                 yield proxy

    # def crawl_ip66(self):
    #     """
    #     获取代理66
    #     :return: 代理
    #     """
    #     url = 'http://www.66ip.cn/nmtq.php?getnum=100&isp=0&anonymoustype=3&start=&ports=&export=&ipaddress=&area=1&proxytype=2&api=66ip'
    #     headers = {
    #         "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
    #         "Cookie": "yd_cookie=6ff7861f-5b66-496086e5975d60e7c3a747a2f692c23b744d; _ydclearance=2e64b45f72d39405fcef5ffc-c41e-46e4-ad65-9e101eb9ad7d-1545712913; Hm_lvt_1761fabf3c988e7f04bec51acd4073f4=1545705735; Hm_lpvt_1761fabf3c988e7f04bec51acd4073f4=1545705956",
    #     }
    #     print('Crawling', url)
    #     html = get_page(url, options=headers)
    #     if html:
    #         html = etree.HTML(html)
    #
    #         yield proxy
