from bs4 import BeautifulSoup
import execjs
import re

"""处理爬下后的页面"""

head = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'

def bs_x(path = r'./xiaomi.html'):
    store = []
    with open(path,'r', encoding='utf-8') as f:
        article = f.read()
    catch = re.findall('class="high">.*<\/a>',string=article)
    catch_get = re.compile('[\u4e00-\u9fa5]+') # 第二次匹配处理数据后筛选
    for i in catch:
        s = catch_get.findall(i)
        if s is not None and len(s) != 0:
            store.append(s[0])
    print(store)

    # html_read = open(r'./xiaomi.html',encoding='utf-8').read()
    # soup = BeautifulSoup(html_read, 'html.parser')
    # link = soup.find_all('li', {'class':'good-box cms-good'})
    # for i in link:
    #     print(i.text.strip())


if __name__ == '__main__':
    a = bs_x()

# # 创建一个 JavaScript 上下文
# ctx = execjs.compile("""
# function add(x, y) {
#     return x + y;
# }
# """)
#
# # 在上下文中执行 JavaScript 函数
# result = ctx.call("add", 3, 4)
# print(result)

