#!/usr/bin/env python
# encoding: utf-8
'''
@author: aliax
@license: (C) Copyright 2018-2020.
@contact: 1048327635@qq.com
@file: get_proxy.py
@time: 2020/7/8 0008 19:54
@desc:
'''
import threading
import requests,time
import Config.config as con
import Config.db as db
from parsel import Selector

class Get_proxy():
    """
    爬取代理网站信息
    """
    def __init__(self):
        pass

    def get_proxy(self,name):
        """
        所有代理集成
        :param name: 选择爬取的网站
        :return:
        """
        while(1):
            #线程名称，用于日志输出
            thread_name = threading.currentThread().name
            #从配置中获取爬取对应网站的页数
            pages = con.pages.get(name)
            #页数遍历
            for page in range(1,pages+1):
                headers = con.headers
                url = con.website.get(name).format(str(page))
                data = requests.get(url=url,headers=headers).text
                par = Selector(data)

                """爬取网站选择"""

                if name in ['快代理','齐云代理','云代理'] :
                    tr_par = par.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
                    # 二次提取数据
                    for tr in tr_par:
                        proxy_ip = tr.xpath('./td[1]/text()').extract_first()
                        proxy_port = tr.xpath('./td[2]/text()').extract_first()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name,name, proxy))
                        time.sleep(1)

                elif name in ['开心代理[普匿]','开心代理[高匿]']:
                    tr_par = par.xpath('//table[@class="active"]/tbody/tr')
                    # 二次提取数据
                    for tr in tr_par:
                        proxy_ip = tr.xpath('./td[1]/text()').extract_first()
                        proxy_port = tr.xpath('./td[2]/text()').extract_first()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name, name, proxy))
                        time.sleep(1)

                elif name in ['免费代理库']:
                    tr_par = par.xpath('//table[@class="layui-table"]/tbody/tr')
                    # 二次提取数据
                    for tr in tr_par:
                        proxy_ip = tr.xpath('./td[1]/text()').extract_first()
                        proxy_port = tr.xpath('./td[2]/text()').extract_first()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name,name, proxy))
                        time.sleep(1)

                elif name in ['蝶鸟代理']:
                    tr_par = par.xpath('//div[@class="free-main col-lg-12 col-md-12 col-sm-12 col-xs-12"]/ul/li')
                    # 二次提取数据
                    for tr in tr_par[1:]:
                        proxy_ip = tr.xpath('./span[1]/text()').extract_first()
                        proxy_port = tr.xpath('./span[2]/text()').extract_first()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name, name, proxy))
                        time.sleep(1)

                elif name in ['89代理']:
                    tr_par = par.xpath('//table[@class="layui-table"]/tbody/tr')
                    # 二次提取数据
                    for tr in tr_par:
                        proxy_ip = tr.xpath('./td[1]/text()').extract_first().strip()
                        proxy_port = tr.xpath('./td[2]/text()').extract_first().strip()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name, name, proxy))
                        time.sleep(1)

                elif name in ['ProxyList']:
                    tr_par = par.xpath('//table[@class="bg"]/tr')
                    # 二次提取数据
                    for tr in tr_par[2:]:
                        proxy_ip = tr.xpath('./td[2]/text()').extract_first()
                        proxy_port = tr.xpath('./td[3]/text()').extract_first()
                        proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                        db.delete("all", proxy)
                        db.set("all", proxy, proxy)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name,name, proxy))
                        time.sleep(1)

                elif name in ['proxylists']:
                    for i in data.split('\r\n')[:-1]:
                        db.delete("all", i)
                        db.set("all", i, i)
                        con.log.info('{}] - {} {} 爬取成功！'.format(thread_name, name, i))

                #当前页面爬取完成，等待10s
                time.sleep(10)
                #con.log.info('{}] - [{}] - 第 {} 页爬取成功！'.format(thread_name,name,page))

            #当前网站抓取完等待300s重新抓取
            time.sleep(300)


    #小幻代理
    def get_proxy_xiaohuan(self, name):
        """
        小幻代理
        :param name: 选择爬取的网站
        :return:
        """
        # 选择页数
        thread_name = threading.currentThread().name
        pages = ['b97827cc','4ce63706','5crfe930','f3k1d581','ce1d45977','881aaf7b5','eas7a436','981o917f5','2d28bd81a','a42g5985d']
        for page in pages:
            headers = con.headers
            url = con.website.get(name).format(str(page))
            data = requests.get(url=url, headers=headers).text
            par = Selector(data)
            tr_par = par.xpath('//table[@class="table table-hover table-bordered"]/tbody/tr')
            # 二次提取数据
            for tr in tr_par:
                proxy_ip = tr.xpath('./td/a[1]/text()').extract_first()
                proxy_port = tr.xpath('./td[2]/text()').extract_first()
                proxy = proxy_ip + ':' + proxy_port  # 封装ip:端口 redis中的key
                # print(proxy)
                db.delete("all", proxy)
                db.set("all", proxy, proxy)
                con.log.info('{}] - {} {} 爬取成功'.format(thread_name, name, proxy))
                time.sleep(5)