from urllib.request import urlopen
from urllib.request import Request
from urllib import parse
from bs4 import BeautifulSoup as bs
import pymysql.cursors
import random
import threading
from com.chq.network.proxy_server import get_proxys, use_proxy

"""
博客主网址
"""
blog_home_url = 'https://blog.csdn.net/QIU176161650'

"""
代理服务器地址列表
"""
proxy_servers = []

def get_random_proxy_server():
    """
    随机获取一个代理地址
    :return:
    """
    global proxy_servers
    # 若代理服务器不够则获新的
    if len(proxy_servers) < 5:
        for x in range(3):
            proxy_servers = get_proxys()
            if len(proxy_servers) > 0:
                break
            else:
                proxy_servers = get_proxys()
    if len(proxy_servers) > 0:
        return proxy_servers[random.randint(0, len(proxy_servers) - 1)]
    else:
        return None


def use_random_proxy(url):
    proxy_server = get_random_proxy_server()
    proxy_addr = "%s:%s" % (proxy_server.ip, proxy_server.port)
    print(url)
    return use_proxy(proxy_addr, url)


if __name__ == "__main__":
    threads = []
    # 获取博客文章列表
    blog_home_data = use_random_proxy(blog_home_url)
    soup = bs(blog_home_data, "html.parser")
    for child in soup.select("div[class='article-item-box csdn-tracking-statistics']"):
        article_url = child.select_one("a").get("href")
        for x in range(30):
            threads.append(threading.Thread(target=use_random_proxy, args=(article_url,)))

    for t in threads:
        t.setDaemon(True)
        t.start()
