#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :cidainwang_shuowenjiezi.py
# @Time      :2023/11/9 
# @Author    :CL
# @email     :1037654919@qq.com
# 爬取 https://www.cidianwang.com/shuowenjiezi/
import math
import re
from bs4 import BeautifulSoup
import requests
from utils import mongo_manager,get_kuai_proxy
cidainwang_shuowenjiezi = mongo_manager("cidainwang_shuowenjiezi",db='public_data')

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}
cookies = {
    "Hm_lvt_e0eec4afa8ab3236c032068920f81d60": "1699509492",
    "Hm_lpvt_e0eec4afa8ab3236c032068920f81d60": "1699515443"
}
# 获取所有说文解字的字和网址
def get_shuowenjiezi_list(url = "https://www.cidianwang.com/shuowenjiezi/"):
    response = requests.get(url,timeout=10,proxies=get_kuai_proxy(), headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response)

    sooups = BeautifulSoup(response.text, 'lxml')
    datas = sooups.find_all('div', class_='fj')
    lists = []
    for fj in datas:
        lis = fj.find_all('li')

        for data in lis:
            name = data.find('a').get_text()
            href = 'https://www.cidianwang.com' + data.find('a').get('href')
            lists.append({'name': name, 'href': href})
    # print(lists)
    return lists
# 获取说文解字详细信息
def download_shuowenjiezi(wenzi):
    url = wenzi['href']
    wenzi['_id']  = wenzi['href'].split('/')[-1].split('.')[0]
    print( wenzi['name'],url)
    response = requests.get(url, headers=headers, proxies=get_kuai_proxy(),timeout=10,cookies=cookies)
    response.encoding = "utf-8"
    soups = BeautifulSoup(response.text, 'lxml')

    infos = soups.find('div', class_='left')
    z_content = infos.find('div', class_='z_content').get_text() #拼音：ān cháng shǒu gù，简 拼：acsg
    wenzi['z_content'] = z_content.strip()
    children = infos.find('div', class_='zd_g2').children
    strs = ''
    for child in children:
        strs +=child.get_text()
    wenzi['data'] =strs.split('关注词典网微信公众号')[0].strip()
    wenzi['status'] = 'success'
    return wenzi


if __name__ == "__main__":
    print()
    # main()
    # print(get_shuowenjiezi_list())
    for wenzi in get_shuowenjiezi_list():
        wenzi['_id'] = wenzi['href'].split('/')[-1].split('.')[0]
        # try:
        #     cidainwang_shuowenjiezi.insertOne(wenzi)
        # except Exception as e:
        #     print(e)
        try:
            wenzi = download_shuowenjiezi(wenzi=wenzi)
            cidainwang_shuowenjiezi.updateOne({"_id":wenzi["_id"]},wenzi)
        except Exception as e:
            print(e)


