# -*- coding: utf-8 -*-
# @Time : 2020/8/14 14:39
# @Author : yongf
# @File : ProxyGet.py
# @Software: PyCharm

from bs4 import BeautifulSoup
from model.ips import *
import json
import re
from threading import *


class ProxyGet:

    # 保存数据
    def save(self, ip, port, server_address, server_type, source):
        msg = "%s\t代理：%s:%d" % (source, ip, port)
        # 检查代理是否可用
        if checkProxyValid(ip, port):
            if ips.save(ips, ip, port, server_address, server_type.upper(), source):
                logging.info(msg + "\t有效，入库成功")
            else:
                logging.info(msg + "\t有效，已在数据库中")
        else:
            logging.info(msg + "\t无效")

    # 免费代理IP
    def yqie1(self):
        '''
        url: http://ip.yqie.com/proxyhttps/
        type: https
        :return:
        '''
        source = 'http://ip.yqie.com/'
        html = openUrl("http://ip.yqie.com/proxyhttps/")
        soup = BeautifulSoup(html, 'html.parser')
        tds = soup.find_all('td')
        if len(tds) > 0:
            for i in range(0, len(tds), 6):
                ip = tds[i + 1].text.strip()
                port = tds[i + 2].text.strip()
                if port:
                    port = int(port)
                server_address = tds[i + 3].text.strip()
                server_type = tds[i + 4].text.strip()
                self.save(ip, port, server_address, server_type, source)

    # 免费代理IP
    def yqie2(self):
        '''
        url: http://ip.yqie.com/ipproxy.htm
        type: https  https
        :return:
        '''
        source = 'http://ip.yqie.com/'
        html = openUrl("http://ip.yqie.com/ipproxy.htm")
        soup = BeautifulSoup(html, 'html.parser')
        tds = soup.find_all('td')
        if len(tds) > 0:
            for i in range(0, len(tds), 6):
                ip = tds[i].text.strip()
                port = tds[i + 1].text.strip()
                if port:
                    port = int(port)
                server_address = tds[i + 2].text.strip()
                server_type = tds[i + 4].text.strip()
                self.save(ip, port, server_address, server_type, source)

    # 66免费代理网
    def ip66(self):
        '''
        url http://www.66ip.cn/areaindex_1/1.html
        :return:
        '''
        source = 'http://www.66ip.cn/'
        logging.info("开始抓取66免费代理网：%s" % source)
        for n in range(1, 30):
            url = 'http://www.66ip.cn/areaindex_:number/1.html'
            logging.info("%s\t爬取第%d个城市" % (source, n))
            html = openUrl(url=url.replace(':number', str(n)), charset='gbk')
            soup = BeautifulSoup(html, 'html.parser')
            tds = soup.find(id='footer').find_all('td')
            if len(tds) > 5:
                for i in range(5, len(tds), 5):
                    ip = tds[i].text.strip()
                    port = tds[i + 1].text.strip()
                    if port:
                        port = int(port)
                    server_address = tds[i + 2].text.strip()
                    server_type = 'HTTP'
                    self.save(ip, port, server_address, server_type, source)
            time.sleep(5)

    # 89免费代理
    def ip89(self):
        '''
         http://www.89ip.cn/index_1.html
        :return:
        '''
        source = 'http://www.89ip.cn/'
        logging.info("开始抓取89免费代理：%s" % source)
        for n in range(1, 10):
            url = 'http://www.89ip.cn/index_%d.html'
            logging.info("%s\t爬取第%d页" % (source, n))
            html = openUrl(url=url % n)
            soup = BeautifulSoup(html, 'html.parser')
            tds = soup.find_all('td')
            if len(tds) > 5:
                for i in range(5, len(tds), 5):
                    ip = tds[i].text.strip()
                    port = tds[i + 1].text.strip()
                    if port:
                        port = int(port)
                    server_address = tds[i + 2].text.strip()
                    server_type = 'HTTP'
                    self.save(ip, port, server_address, server_type, source)
            time.sleep(5)

    # 云代理
    def ip3366(self):
        '''
         http://www.ip3366.net/
        :return:
        '''
        source = 'http://www.ip3366.net/'
        logging.info("开始抓取云代理：%s" % source)
        for n in range(1, 10):
            url = 'http://www.ip3366.net/?stype=1&page=%s'
            logging.info("%s\t爬取第%d页" % (source, n))
            html = openUrl(url=url % n, charset='gbk')
            soup = BeautifulSoup(html, 'html.parser')
            tds = soup.find_all('td')

            if len(tds) > 7:
                for i in range(0, len(tds), 8):
                    ip = tds[i].text.strip()
                    port = tds[i + 1].text.strip()
                    if port:
                        port = int(port)
                    server_address = tds[i + 5].text.strip()
                    server_type = tds[i + 3].text.strip()
                    self.save(ip, port, server_address, server_type, source)
            time.sleep(5)

    # 快代理
    def kuaidaili(self):
        '''
         https://www.kuaidaili.com/free/
        :return:
        '''
        source = 'https://www.kuaidaili.com/free/'
        logging.info("开始抓取快代理：%s" % source)
        for n in range(1, 4):
            url = 'https://www.kuaidaili.com/free/inha/%d/'
            logging.info("%s\t爬取第%d页" % (source, n))
            html = openUrl(url=url % n)
            soup = BeautifulSoup(html, 'html.parser')
            tds = soup.find_all('td')
            if len(tds) > 5:
                for i in range(0, len(tds), 7):
                    ip = tds[i].text.strip()
                    port = tds[i + 1].text.strip()
                    if port:
                        port = int(port)
                    server_address = tds[i + 4].text.strip()
                    server_type = tds[i + 3].text.strip()
                    self.save(ip, port, server_address, server_type, source)
            time.sleep(5)

    # 极速专享代理
    def superfastip(self):
        '''
         https://api.superfastip.com/ip/freeip?page=1
        :return:
        '''
        logging.info("开始抓取极速专享代理：%s" % source)
        for n in range(1, 10):
            url = 'https://api.superfastip.com/ip/freeip?page=%d' % n
            html = openUrl(url=url)
            data = json.loads(html)
            logging.info(data)
            if len(data) > 0:
                trs = data['freeips']
                for tr in trs:
                    ip = tr['ip'].strip()
                    port = tr['port'].strip()
                    if port:
                        port = int(port)
                    server_address = tr['country'].strip()
                    server_type = tr['type'].strip()
                    self.save(ip, port, server_address, server_type, source)
            time.sleep(5)

    # 小舒代理 http://www.xsdaili.cn/
    def xsdaili(self):
        logging.info('开始爬取 小舒代理 http://www.xsdaili.cn/')
        url = 'http://www.xsdaili.cn/'
        html = openUrl(url)
        soup = BeautifulSoup(html, 'html.parser')
        divs = soup.find_all('div', class_='title')

        if len(divs) > 1:
            # 只访问前2个的
            for i in range(0, 2):
                href = divs[i].a['href']
                detail = openUrl(url + href)
                detailSoup = BeautifulSoup(detail, 'html.parser')
                p = detailSoup.find('div', class_='cont').text
                if len(p) > 0:
                    pattern = re.compile(r'(\d+.\d+.\d+.\d+):(\d+)@(\w+)')
                    ipList = pattern.findall(p)
                    for item in ipList:
                        ip = item[0].strip()
                        port = item[1].strip()
                        if port:
                            port = int(port)
                        server_address = ''
                        server_type = item[2].strip()
                        self.save(ip, port, server_address, server_type, url)
                time.sleep(2)
        logging.info('爬取结束 小舒代理 http://www.xsdaili.cn/')

    # 西拉免费代理IP 质量比较高 http://www.xiladaili.com/
    def xiladaili(self):
        logging.info('开始爬取 西拉免费代理IP 质量比较高 http://www.xiladaili.com/')
        urls = [
            "http://www.xiladaili.com/gaoni/%d/",
            "http://www.xiladaili.com/http/%d/",
            "http://www.xiladaili.com/https/%d/"
        ]
        for url in urls:
            for n in range(1, 6):
                url = url % n
                html = openUrl(url)
                soup = BeautifulSoup(html, 'html.parser')
                tds = soup.find_all('td')
                if len(tds) > 5:
                    for i in range(0, len(tds), 8):
                        proxy = tds[i].text.split(':')
                        ip = proxy[0].strip()
                        port = proxy[1].strip()
                        if port:
                            port = int(port)
                        server_type = tds[i + 1].text.strip().replace('代理', '')
                        server_address = tds[i + 3].text.strip()
                        self.save(ip, port, server_address, server_type, url)
                time.sleep(2)
        logging.info('爬取结束 西拉免费代理IP 质量比较高 http://www.xiladaili.com/')

    # 小幻HTTP代理 https://ip.ihuan.me/
    def ihuan(self):
        source = 'https://ip.ihuan.me/'
        url = 'https://ip.ihuan.me/?page=%s'
        for p in range(1, 301):
            html = openUrl(url % p)
            soup = BeautifulSoup(html, 'html.parser')
            tds = soup.find_all('td')
            if len(tds) > 9:
                print(tds)
                for i in range(0, len(tds), 10):
                    ip = tds[i + 0].a.text.strip()
                    port = tds[i + 1].text.strip()
                    if port:
                        port = int(port)
                    server_address = tds[i + 2].a.text.strip()
                    https = tds[i + 4].text.strip()
                    http = tds[i + 5].text.strip()
                    if https == '不支持':
                        server_type = 'HTTP'

                    break
            break


if __name__ == '__main__':
    p = ProxyGet()
    threads = []  # 进程列表
    threads.append(Thread(target=p.yqie1))  # 免费代理IP http://ip.yqie.com/proxyhttps/
    threads.append(Thread(target=p.yqie2))  # 免费代理IP http://ip.yqie.com/ipproxy.htm
    threads.append(Thread(target=p.ip66))  # 66免费代理网  http://www.66ip.cn/areaindex_1/1.html
    threads.append(Thread(target=p.ip89))  # 89免费代理  http://www.89ip.cn/index_1.html
    threads.append(Thread(target=p.ip3366))  # 云代理  http://www.ip3366.net/?stype=1&page=1
    threads.append(Thread(target=p.kuaidaili))  # 快代理  https://www.kuaidaili.com/free/inha/1/
    threads.append(Thread(target=p.superfastip))  # 极速专享代理  https://api.superfastip.com/ip/freeip?page=1
    threads.append(Thread(target=p.xsdaili))  # 小舒代理 http://www.xsdaili.cn/
    threads.append(Thread(target=p.xiladaili))  # 西拉免费代理IP 质量比较高 http://www.xiladaili.com/
    # p.ihuan()  # 小幻HTTP代理 https://ip.ihuan.me/

    for n in threads:
        n.start()
