#爬取csdn 热门文章
import json
import urllib.request
from urllib.parse import unquote
from lxml import etree
# 请求文章详情
def queryBlogDetail(url,headers,proxies):
    request = urllib.request.Request(url=url, headers=headers)
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)
    content = response.read().decode("utf8");
    html = etree.HTML(content)
    titleEle = html.xpath('//*[@id="articleContentId"]')[0];

    title = titleEle.xpath('text()')[0];
    contentHtml = html.xpath("//div[@id='article_content']")[0];
    print("文章地址=>",url)
    print(etree.tostring(contentHtml, encoding='unicode', method='html'))
# 获取推荐文章列表的所有A标签href
def requestCSDN():
    url = 'https://blog.csdn.net/nav/web'
    # 模拟浏览器请求
    headers = {
        'user-agent': ' Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
    }
    proxies_poll = [
        {'http': '103.37.141.69:80'},
        {'http': '118.24.219.151:16817'}
    ]
    import random
    proxies = random.choice(proxies_poll)
    request = urllib.request.Request(url=url, headers=headers)
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)
    content = response.read().decode("utf8")
    html = etree.HTML(content)
    hrefs = html.xpath('//*[@class="blog-content"]/div/div/div/div/div[@class="content"]/a/@href');
    # for item in hrefs:
    #     queryBlogDetail(item,headers,proxies)
    queryBlogDetail(hrefs[0], headers, proxies)

if __name__ == '__main__':
    requestCSDN();