#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/11/8 8:48
# @Author  : Yunhao.Cao
# @File    : main.py
from __future__ import absolute_import, unicode_literals
from bs4 import BeautifulSoup
import requests
import time

__author__ = 'Yunhao.Cao'

__ALL__ = []


def spider(url):
    """
    爬虫，解析页面，返回整个页面的爬虫结果
    :param url:
    :return: 整个页面的爬虫结果
    """
    # 需要添加头信息，否则请求不到
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36"
    }

    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, "html.parser")

    table_list = soup.find(id="ip_list")

    tr_list = table_list.find_all("tr")

    page_result = []

    # 对每一行进行解析
    for index, tr in enumerate(tr_list):
        if index == 0:
            continue
        td_list = tr.find_all("td")

        ip = td_list[1].text
        port = td_list[2].text
        location = td_list[3].text
        connect_type = td_list[5].text
        connect_speed = td_list[6].find("div", class_="bar").get("title")
        connect_time = td_list[7].find("div", class_="bar").get("title")
        alive_time = td_list[8].text
        verify_time = td_list[9].text

        td_out = (ip, port, location, connect_type, connect_speed, connect_time, alive_time, verify_time)
        page_result.append(td_out)

    return page_result


def write_to_file(file_url, page_result):
    """
    将爬取的结构写入文件，以追加的形式写入
    :param file_url:
    :param page_result:
    :return:
    """
    with open(file_url, "a") as f:
        for row in page_result:
            f.write("{},{}:{}\n".format(row[3], row[0], row[1]))


def _main():
    file_url = "./proxy_list.txt"
    url = "http://www.xicidaili.com/nn"

    # 爬取的页数，1~num页
    num = 3
    print("** 需要爬{}页...".format(num))
    # 生成分页列表
    url_list = map(lambda x: "{}/{}".format(url, x), range(1, num + 1))
    # 刷新输出的文件
    with open(file_url, "w"):
        pass

    for index, url in enumerate(url_list):
        page_result = spider(url)
        # 添加至文件
        write_to_file(file_url, page_result)
        print("** 第{}页已完成...({}/{})".format(index + 1, index + 1, num))
        time.sleep(5)


if __name__ == '__main__':
    _main()
