import requests
from bs4 import BeautifulSoup
import re
from crawl_helper.fake_useragent import fake_useragent
from crawl_helper.ippool.redis_ippool import IPPool
import time
from util.log import logger
from util.log import init_console_logger
import sys

"""
爬取代理网站获取ip

使用方法: 
    cd src
    python -m crawl_helper.ippool.get_proxy
"""


class GetProxy(object):
    def __init__(self):
        self.__URL_PREFIX = "https://www.kuaidaili.com/free/inha/%d"
        self.__TEST_URL = "http://httpbin.org/get"

    @staticmethod
    def get_html(url, headers, proxies=False, retry_times=3):
        response = None
        for cnt in range(retry_times):
            # noinspection PyBroadException
            try:
                if not proxies:
                    response = requests.get(
                        url=url, headers=headers, timeout=5)
                else:
                    response = requests.get(
                        url=url, headers=headers, proxies=proxies, timeout=5)
                break
            except Exception as e:
                logger.exception(e)
                continue
        if response is None:
            logger.warning(
                "get response failed, please check the url: {}", url)

        # noinspection PyBroadException
        try:
            html = response.content.decode("utf-8")
            return html
        except Exception as e:
            logger.exception(e)
            return None

    @staticmethod
    def parse_html2ip_list(html):

        ip_list = []
        soup = BeautifulSoup(html, "html.parser")
        tds = soup.find_all("td")

        for index, td in enumerate(tds):
            # use regex to match proxy_ip in tds
            if re.match(r"^\d+\.\d+\.\d+\.\d+$", re.sub(r"\s+|\n+|\t+", "", td.text)):
                # the elements appear in sequence on the website, like "163.204.240.21, 9999, 广东, 高匿, HTTP"
                # each represents proxy_ip, port, province, type and protocol
                item = list()
                item.append(re.sub(r"\s+|\n+|\t+", "", td.text))
                item.append(re.sub(r"\s+|\n+|\t+", "", tds[index + 1].text))
                item.append(re.sub(r"\s+|\n+|\t+", "", tds[index + 2].text))
                item.append(re.sub(r"\s+|\n+|\t+", "", tds[index + 3].text))
                item.append(re.sub(r"\s+|\n+|\t+", "", tds[index + 4].text))
                ip_list.append(item)
        return ip_list

    def ip_validation(self, raw_ip):
        """检查获取到的ip是否有效
        """
        validation_flag = True

        ip_with_port = str(raw_ip[0]) + ":" + str(raw_ip[1])
        proxies = {"https": "https://" + ip_with_port}
        headers = {
            'User-Agent': fake_useragent.get_random_useragent(),
        }

        try:
            response = requests.get(
                url=self.__TEST_URL, headers=headers, proxies=proxies, timeout=5)
        except Exception as e:
            logger.exception(e)
            return False

        if response.status_code != 200:
            validation_flag = False

        return validation_flag

    def save_ip2redis(self, ip_list):
        ip_cnt = len(ip_list)
        cnt = 0  # calculate the ip number we put into redis
        for index, ip in enumerate(ip_list):
            # test validation
            if self.ip_validation(ip) and ip[2] == '高匿名':
                IPPool().insert_ip(ip)
                cnt += 1
        logger.info("get {} ip, the effective rate is {:.2f}%",
                    cnt, cnt / ip_cnt * 100)

    def gen_proxy_ip_pool(self, start_page=0, end_page=1):
        urls = [self.__URL_PREFIX % (index + 1)
                for index in range(start_page, end_page)]
        url_cnt = len(urls)

        for index, url in enumerate(urls):
            logger.info("now parse the {} url，the overall progress is {}/{}, url:{}",
                        index + 1, index + 1, url_cnt, url)

            headers = {
                'User-Agent': fake_useragent.get_random_useragent(),
            }

            ip = IPPool().get_random_key()
            proxies = {"http": "http://" + ip}

            response = self.get_html(url=url, headers=headers, proxies=proxies)
            ip_list = self.parse_html2ip_list(response)
            if len(ip_list) == 0:
                logger.warning(
                    "the ip_list is empty, please check the url:{}", url)
                return None
            self.save_ip2redis(ip_list)
            logger.info("we already crawl {} url, overall progress is {:.2f}%",
                        index + 1, (index + 1) / url_cnt * 100)
            logger.info("now sleep for 30s...")
            time.sleep(30)


if __name__ == "__main__":
    # 在控制台打日志
    init_console_logger()
    # 获取代理IP并存储在redis中
    GetProxy().gen_proxy_ip_pool(start_page=0, end_page=10)
