'''
import re #正则表达式

str = "共3614条  123/181"
cost=re.findall(r'[1-9]+', str)[0]
print(cost) #['3614', '181']
'''
import requests

url = r"http://news.whpu.edu.cn/info/1002/17083.htm"
def askURL(url):
    #模拟浏览器头部信息，向服务器发送信息
    header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}
    html = ""
    try:
        html = requests.get(url=url, headers=header)
    except Exception as e:
        print(e)

    html.encoding = "utf-8"
    html = html.text
    return html

url1 = "http://news.whpu.edu.cn/info/1002/17073.htm"

#获取浏览量
#网站的内容由前端的JS动态生成
from selenium import webdriver
from selenium.webdriver import Chrome, ChromeOptions
from time import sleep
from lxml import etree
def getView(url):
    # 无可视化界面并实现让selenium规避被检测的风险的操作
    opt = ChromeOptions()  # 创建Chrome参数对象
    opt.headless = True  # 把Chrome设置成可视化无界面模式，windows/Linux 皆可
    # 创建Chrome无界面对象
    # 实例化一个浏览器对象（传入浏览器的驱动）
    path = r'F:\Google Chrome x64\chromedriver.exe'
    bro = webdriver.Chrome(path, options=opt)
    # 获取1到54号英雄的所有皮肤壁纸
    # 到第3页的时候，报错，原因：页面没有加载处理，一定记得sleep一下
    # 1、一个英雄皮肤的url
    bro.get(url)
    sleep(0.5)  # 重要
    #3、page_source:获取浏览器当前页面的源码数据(已经动态加载了所有的英雄皮肤列表)
    page_text = bro.page_source
    tree = etree.HTML(page_text)
    view = tree.xpath("//*[@id=\"content\"]/div[2]/form/div/ul/li[4]/span[4]/span/text()")[0]
    view = int(view)
    print(view)
    bro.quit()
    bro.close()
    return view

str = getView(url1)
print(type(str))
#html = askURL(url)
#print(html)

