
import requests
import random, time
from bs4 import BeautifulSoup
import requests,time,re
from lxml import etree
import urllib.parse

def get_ua():
    user_agents = [
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
        "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
        "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
        "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
    ]
    return random.choice(user_agents)


def create_proxy_pool(pages=10):
    proxy_pool = []

    for i in range(pages):#10页
        url = f'http://www.kxdaili.com/dailiip/1/{i+1}.html'
        response = requests.get(url=url)
        pattern = r'<td>([\d.]+)</td>'#匹配出<td></td>之间的内容，并只保留其中是数字和小数点.的部分
        results = re.findall(pattern, response.text)
        print(results)
        for n in range(10):
            try:
                ip_temp = results[2*n] + ":" + results[2*n+1]
                proxy = {'http':  ip_temp}
                proxy_pool.append(proxy)
            except Exception:
                print("没了")
                break
        #xpath报错，奇葩，浏览器调试正常
        #ip = result.xpath(f"/html/body/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/table/tbody/tr[{n + 1}]/td[1]/text()")[0]
        #port = result.xpath(f"/html/body/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/table/tbody/tr[{n + 1}]/td[2]/text()")[0]

        time.sleep(5)
    return proxy_pool

# def create_proxy_pool(pages=10):
#     """
#     pages:int:需要获取的ip页数，一页有十个ip,默认获取十页，即100个ip
#     """
#     # 从快代理免费ip代理网站获取代理IP列表
#     url = "https://free.kuaidaili.com/free/inha/{}/"
#     proxy_pool = []
#     print(f"正在构建ip池，本次ip池构建数量:{pages*10}....")
#     for page in range(pages):
#         r = requests.get(url.format(page +1), headers={'User-Agent': get_ua()})
#         soup = BeautifulSoup(r.content, 'lxml')
#         ips = soup.findAll('tr')
#         # 0为标头，所以从1开始遍历
#         for i in range(1, len(ips)):
#             ip_row = ips[i]
#             tds = ip_row.findAll("td")
#             proxy = {'http': tds[0].get_text()}
#             proxy_pool.append(proxy)
#     return proxy_pool


def start_read(csdn_url, count=50):
    """
    csdn_url:str:访问的博客链接
    count:int 访问次数 默认为50次
    """
    proxy_pool = create_proxy_pool(20)
    print('代理ip池：%s' % proxy_pool)
    for i in range(1, count + 1):
        try:
            proxy = random.choice(proxy_pool)
            print('=========\n当前代理IP: %s' % proxy)
            r = requests.get(url=csdn_url, timeout=10, proxies=proxy, headers={'User-Agent': get_ua()})
            html = r.content
            soup = BeautifulSoup(html, "lxml")
            title = soup.find('h1')
            # 爬取文章的标题及当前访问量
            read_count = soup.find('span', attrs={'class': 'read-count'})
            print('文章标题：%s' % title.get_text())
            print('当前文章访问量：%s\n' % read_count.get_text())
            print('正在进行第%s次访问...' % i)
            # 随机等待60~120s,否则会被认为是同一次访问
            sleep_time = random.randint(60, 120)
            print('访问完成，开始等待，本次等待时间：%ds' % sleep_time)
            time.sleep(sleep_time)
        except:
            print('访问失败')
            pass

if __name__ == '__main__':
    # 仅支持csdn的博客访问
    url = 'http://t.csdnimg.cn/nr8E9'
    start_read(url,1000)
