import requests
import lxml.html
import pymongo
# 1. 请求头伪装，尽可能避免被网站的反扒机制识别的
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'}
response = requests.get('https://www.wzj.com/',headers=headers)
print(response.status_code)


# 2. 整合数据包的函数
def create_filter_data(i, names, hrefs):
    children = []  # 整理后的子类目数据
    for name in names:
        child_idx = names.index(name)
        children.append({
            "name": name,
            "href": hrefs[child_idx],
        })
    lv1 = {
        "name": tab_items[i].strip().replace('\n', ''),
        "children": children
    }
    filter_list.append(lv1)


filter_list = [   # 存放最终的数据包
    # {"name":'主标题',"children":[]}
]
if response.status_code == 200:
    my_html = response.text   # 得到目标网页html
    selector = lxml.html.fromstring(my_html)
    tab_items = selector.xpath('//div[@class="filter-header"]/div/div[@class="tab-list tab-single-infos"]/div/text()')
    # print(tab_items)  # 一级类目

    filter_main = selector.xpath('//div[@class="filter-main"]')  # 二级类目容器
    print(filter_main)
    for filter_m in filter_main:
        idx = filter_main.index(filter_m)
        filter_href = filter_m.xpath('.//a/@href')  # 提取子分类超链接
        filter_name = filter_m.xpath('.//a/div/img/@alt')  # 提取子分类名称
        if idx == 2:
            filter_name = filter_m.xpath('.//div/div/img/@alt')  # 第三个子分类格式特殊，需要单独处理
        print(filter_href)
        print(filter_name)
        create_filter_data(idx, filter_name, filter_href)

print(filter_list)
# 将数据存入数据库

client = pymongo.MongoClient('localhost', 27017)
db = client['spider-db']
collection = db['wzj_category']
collection.insert_many(filter_list)

