# -*- coding:UTF-8 -*-
# 爬取csdn 热门文章
import urllib.request
from lxml import etree
import json
import html2text as ht
import requests
import re
from html2markdown import convert
# 保存文章
def saveBlog(blog):

    headers = {'Content-type': 'application/json'}
    requests.post(url='http://localhost:8800/shiyi/v1/article/saveArticle', data=json.dumps(blog),headers = headers);
    print("保存成功=>",blog.get('title'));
# 请求文章详情
def queryBlogDetail(url, headers, proxies,avatarurl,desc):
    request = urllib.request.Request(url=url, headers=headers)
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)
    content = response.read().decode("utf8");
    html = etree.HTML(content)

    contentHtml = html.xpath('//*[@id="content_views"]')[0];
    titleEle = html.xpath('//*[@id="articleContentId"]/span[1]')[0];
    titleText = titleEle.text
    tagSource = etree.tostring(html, encoding='unicode', method='html')
    tagList = []
    match = re.search(r'var toolbarSearchExt = \'(.+?)\';', tagSource);
    if match:
        result = match.group(1)

        print(result)
        toolbarSearchExt_json = json.loads(result)

        tagList = toolbarSearchExt_json["tag"]
        print(tagList)
    else:
        print("No match found.")
    text_maker = ht.HTML2Text()
    htmlText = etree.tostring(contentHtml, encoding='unicode', method='html');


    # 将HTML内容转换为Markdown
    contentMd = convert(htmlText)


    # print("标题=>", titleText);
    # print("封面=>", avatarurl)
    # print("描述=>", desc)
    # print("标签=>", tagList)
    # print("原文地址=>", url)

    saveBlog({
        'contentMd': contentMd,
        'content':etree.tostring(contentHtml,encoding='utf-8').decode('utf-8'),
        'title': titleText,
        'avatar': avatarurl,
        'summary': desc,
        'tagNameList':tagList,
        'originalUrl':url
    })


# 获取推荐文章列表的所有A标签href
def requestCSDN():
    url = 'https://cms-api.csdn.net/v1/web_home/select_content?componentIds=www-blog-recommend&offset_id=0&cate1=web'
    # 模拟浏览器请求
    headers = {
        'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
    }
    proxies_poll = [
        {'http': '103.37.141.69:80'},
        {'http': '118.24.219.151:16817'}
    ]
    import random
    proxies = random.choice(proxies_poll)
    request = urllib.request.Request(url=url, headers=headers)
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)
    content = response.read().decode("utf8")
    articleList = json.loads(content).get('data').get('www-blog-recommend').get('info')
    print(articleList)

    for item in articleList:
        queryBlogDetail(item.get('extend').get('url'), headers, proxies,
                        item.get('extend').get('pic'),item.get('extend').get('desc'))

if __name__ == '__main__':
    requestCSDN();
