import re
import time
import requests
from threading import Thread
from bs4 import BeautifulSoup
from ProxiesPool.ProxySave import Saver
from Spider.util.UserAgentPool import MyUserAgent


def download(url):
    headers = {
        'User-Agent': MyUserAgent.random()
    }
    response = requests.get(url=url, headers=headers, allow_redirects=False)
    if response.status_code == 200:
        response.encoding = 'utf-8'
        return response.text
    else:
        return None


class ProxiesMetaclass(type):
    def __new__(cls, name, bases, attrs):
        attrs['CrawlFunc'] = []
        for k, v in attrs.items():
            if 'crawl_' in k:
                attrs['CrawlFunc'].append(v)
        return type.__new__(cls, name, bases, attrs)


class CrawlProxy(object, metaclass=ProxiesMetaclass):
    def crawl_66ip(self):
        print('开始获取66ip免费代理')
        mysql = Saver()
        page = 10
        first_url = 'http://www.66ip.cn/{}.html'
        urls = [first_url.format(i) for i in range(1, page + 1)]
        for url in urls:
            response = download(url=url)
            if response:
                result = BeautifulSoup(response, 'lxml')
                trs = result.select('.containerbox table tr')
                for index in range(1, len(trs)):
                    ip = re.search('<tr><td>(.*?)</td>', str(trs[index])).group(1)
                    port = re.search('<td>(\d+)</td>', str(trs[index])).group(1)
                    proxy = ip + ':' + port
                    mysql.add_data(proxy)
            time.sleep(2)

    def crawl_kuaidaili(self):
        print('开始获取快代理免费代理')
        mysql = Saver()
        page = 10
        first_url = 'https://www.kuaidaili.com/free/inha/{}/'
        urls = [first_url.format(i) for i in range(1, page + 1)]
        for url in urls:
            response = download(url=url)
            if response:
                result = BeautifulSoup(response, 'lxml')
                trs = result.select('#list tbody tr')
                for tr in trs:
                    ip = re.search('<td data-title="IP">(.*?)</td>', str(tr)).group(1)
                    port = re.search('<td data-title="PORT">(\d+)</td>', str(tr)).group(1)
                    proxy = ip + ':' + port
                    mysql.add_data(proxy)
            time.sleep(2)

    def crawl_xicidaili(self):
        print('开始获取西刺代理免费代理')
        mysql = Saver()
        page = 5
        first_url = 'https://www.xicidaili.com/nn/{}'
        urls = [first_url.format(i) for i in range(1, page + 1)]
        for url in urls:
            response = download(url=url)
            if response:
                result = BeautifulSoup(response, 'lxml')
                trs = result.select('#ip_list tr')
                for index in range(1, len(trs)):
                    ip = re.search('<td>(\d+\.\d+\.\d+\.\d+)</td>', str(trs[index])).group(1)
                    port = re.search('<td>(\d+)</td>', str(trs[index])).group(1)
                    proxy = ip + ':' + port
                    mysql.add_data(proxy)
            time.sleep(2)

    def crawl_port(self):
        pass


class Getting(object):
    def __init__(self):
        self.crawl = CrawlProxy()
        self.save = Saver()
        self.__max = 200

    def run(self):
        self.save.delete_data()
        if self.save.count() < self.__max:
            print('获取器开始运行')
            tasks = []
            # self.crawl.crawl_port()
            for task in self.crawl.CrawlFunc:
                t = Thread(target=task, args=(self.crawl,))
                tasks.append(t)
                t.start()
            for tk in tasks:
                tk.join()
            print('获取器执行完毕')
        else:
            print('代理池中代理数已超过最大值')

