"""
Author: MoQsien
主要用于爬取快代理网站的免费HTTP代理IP，并测试代理IP的可用性，
将可用的代理IP存入json文件中备用
"""

import requests
import json
import random
from lxml import etree
from user_agents import user_agent_list


class KuaiDaiLi(object):
    """爬取快代理网的免费高匿代理IP，测试后保存
       参数pages: 翻页的页面数字列表，可以指定任何正整数列表
    """

    def __init__(self, pages):
        self.start_url = "https://www.kuaidaili.com/free/inha/"
        self.headers = {
            "User-Agent": random.choice(user_agent_list),
            "Referer": self.start_url
        }
        self.pages = pages

    # 构造请求地址
    def gen_urls(self, pages):
        urls = []
        for page_num in pages:
            url = self.start_url + str(page_num) + "/"
            urls.append(url)
        # print(urls)
        if urls:
            return urls

    # 发送请求，获取响应
    def get_content(self, url):
        respons = requests.get(url, headers=self.headers)
        return respons.content.decode()

    # 提取代理IP信息
    def extract_ips(self, content):
        ip_list = []
        ip_info = {}
        html = etree.HTML(content)
        items = html.xpath("//div[@id='list']//table//tbody//tr")

        for item in items:
            ip_info["ip"] = item.xpath("./td[1]/text()")[0]
            ip_info["port"] = item.xpath("./td[2]/text()")[0]
            ip_info["type"] = item.xpath("./td[4]/text()")[0]
            ip_list.append(ip_info)
        return ip_list

    # 测试代理IP是否可用
    def test_ips(self, ip_list):
        for ip_info in ip_list:
            if ip_info['type'] == "HTTP":
                print(ip_info['type'])
                proxies = {"http": ip_info['ip'] + ':' + ip_info['port']}
                try:
                    respon = requests.get("http://www.baidu.com", proxies=proxies, timeout=5)
                    print(respon.status_code)
                except:
                    ip_list.pop(ip_list.index(ip_info))
        return ip_list

    # 保存可用IP信息
    def save_ips(self, ip_list):
        with open("./ip.json", "a") as f:
            for ip_info in ip_list:
                json.dump(ip_info, f, indent=4)

    # 运行爬虫
    def run_spider(self):
        urls = self.gen_urls(self.pages)
        for url in urls:
            content = self.get_content(url)
            ip_list = self.extract_ips(content)
            ip_list_tested = self.test_ips(ip_list)
            self.save_ips(ip_list_tested)


def main():
    pages = [2, 3]
    kuai_dai_li = KuaiDaiLi(pages)
    kuai_dai_li.run_spider()


if __name__ == '__main__':
    main()
