"""
抓取快代理网站代理ip，建立代理ip池
create database proxydb charset utf8;
use proxydb;
create table proxy_tab(
ip varchar(100),
port varchar(20)
)charset=utf8;
"""
import requests
from lxml import etree
from fake_useragent import UserAgent
import pymysql

class KuaiProxyPool:
    def __init__(self):
        self.url = 'https://www.kuaidaili.com/free/inha/{}/'
        self.check_url = 'http://httpbin.org/get'
        # mysql
        self.db = pymysql.connect('localhost', 'root', '123456', 'proxydb', charset='utf8')
        self.cur = self.db.cursor()
        self.ins = 'insert into proxy_tab values(%s,%s)'

    def get_headers(self):
        headers = {'User-Agent':UserAgent(path='fake_useragent.json').random}

        return headers

    def get_proxy(self, url):
        """爬虫逻辑函数"""
        headers = self.get_headers()
        html = requests.get(url=url, headers=headers).text
        # lxml+xpath解析提取数据
        eobj = etree.HTML(html)
        tr_list = eobj.xpath('//table/tbody/tr')
        for tr in tr_list:
            ip_list = tr.xpath('./td[1]/text()')
            port_list = tr.xpath('./td[2]/text()')
            if ip_list and port_list:
                # 对此代理ip进行测试
                ip, port = ip_list[0], port_list[0]
                self.check_proxy(ip, port)
            else:
                print('ip port not crawled')

    def check_proxy(self, ip, port):
        """测试一个代理ip是否可用"""
        proxies = {
            'http':'http://{}:{}'.format(ip, port),
            'https':'https://{}:{}'.format(ip, port),
        }
        try:
            headers = self.get_headers()
            resp = requests.get(url=self.check_url, proxies=proxies, headers=headers, timeout=3)
            # 未抛出异常,则存数据库
            self.cur.execute(self.ins, [ip, port])
            self.db.commit()
            print(ip, port, 'is ok')
        except Exception as e:
            # print('--error', e)
            print(ip, port, 'is not ok')

    def crawl(self):
        for page in range(1, 1001):
            page_url = self.url.format(page)
            self.get_proxy(page_url)

        # 断开数据库连接
        self.cur.close()
        self.db.close()

if __name__ == '__main__':
    spider = KuaiProxyPool()
    spider.crawl()










