#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/11/8 10:13
# @Author  : Yunhao.Cao
# @File    : main.py
from __future__ import absolute_import, unicode_literals
import random
from bs4 import BeautifulSoup
import requests
import time
import logging

__author__ = 'Yunhao.Cao'

__ALL__ = []


def spider(url, proxy):
    """
    一个简单的爬虫
    :param url:
    :param proxy: eg:("http", "http://127.0.0.1:9999")
    :return: 网页的Title
    """
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36"
    }

    proxies = {
        proxy[0].lower(): proxy[1],
    }

    print(proxies)

    response = requests.get(url, headers=headers, proxies=proxies)
    soup = BeautifulSoup(response.text, "html.parser")

    title = soup.title.text

    print(title)

    return title


def read_file():
    """
    从文件里读取url列表和proxy列表
    :return:
    """
    url_list_file = "./url_list.txt"
    proxy_list_file = "./proxy_list.txt"

    with open(url_list_file, "r") as f:
        lines = f.readlines()
        url_list = map(lambda x: x.replace("\n", ""), lines)

    with open(proxy_list_file, "r") as f:
        lines = f.readlines()
        proxy_list = map(lambda x: x.replace("\n", "").split(","), lines)

    return url_list, proxy_list


def get_random_proxy(proxy_list):
    """
    随机获得一个代理
    :param proxy_list:
    :return:
    """
    # 随机从代理列表取出一个代理
    connect_type, ip = proxy_list[int(random.random() * len(proxy_list))]
    # 如果ip里不包括协议，则加上
    if "://" not in ip and connect_type not in ip:
        ip = "{}://{}".format(connect_type, ip)

    return connect_type, ip


def _main():
    # log文件位置
    logging_file = "./log.txt"
    # logging配置
    logging.basicConfig(filename=logging_file, level=logging.INFO)

    # 从文件里读取url列表和代理列表
    url_list, proxy_list = read_file()

    url_list = url_list[:10]

    # 单进程遍历url列表
    for url in url_list:
        proxy = get_random_proxy(proxy_list)

        # 开始时间
        start_time = time.clock()

        # 爬虫逻辑
        spider(url, proxy)
        # 结束时间
        end_time = time.clock()
        # 花费的时间
        interval = end_time - start_time

        # 输出的log
        log = "{}, {}, {}, {}s".format(proxy[1], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), url,
                                       interval)
        print(log)
        logging.info(log)
        print("Sleep .....")
        time.sleep(5)


if __name__ == '__main__':
    _main()
