#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :cidianwang_cd.py
# @Time      :2023/11/9 
# @Author    :CL
# @email     :1037654919@qq.com
# 爬取 https://www.cidianwang.com/cd/
import math
import re
from bs4 import BeautifulSoup
import requests
from tqdm import tqdm

from utils import mongo_manager,get_kuai_proxy
cidainwang_cd = mongo_manager("cidainwang_cd",db='public_data')

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}
cookies = {
    "Hm_lvt_e0eec4afa8ab3236c032068920f81d60": "1699509492",
    "Hm_lpvt_e0eec4afa8ab3236c032068920f81d60": "1699509492"
}

# 获取字典 字母和数量（数量不准确，）
def get_cd_num(url = "https://www.cidianwang.com/cd/"):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    # print(response.text)
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    datas = sooups.find_all('div', class_='d2')[-1].find_all('li')
    lists = []
    for data in datas:
        name = data.find('a').get_text()
        num = data.get_text()
        num  = re.findall(r"\d+", num)[0]

        href =  data.find('a').get('href')
        lists.append({'name': name, 'href': href, 'num': num})
    # print(lists)
    return lists
# 获取词典列表，基于字母
def  get_cd_list(url = "https://www.cidianwang.com/cd/a1.htm"):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    lists = []
    try:
        datas = sooups.find('div', class_='left').find_all('li')
        for data in datas:
            cd = {}
            cd['name'] = data.find('a').get_text()
            cd['href'] = 'https://www.cidianwang.com' + data.find('a').get('href')

            lists.append(cd)
    except:
        pass
    return lists
# download_cdinfo:下载词典网址
def download_cdinfo():
    for ll in tqdm(range(ord("a"), ord("z") + 1)):
        ll = chr(ll)
        url  =f'https://www.cidianwang.com/cd/{ll}.htm'
        response = requests.get(url, headers=headers, cookies=cookies,proxies=get_kuai_proxy())
        response.encoding = "utf-8"
        soups = BeautifulSoup(response.text, 'lxml')
        try:
            rzbar = soups.find('div', class_='rzbar').get_text()
            num  = re.findall(r"\d+", rzbar)[0]
            print(rzbar,num)
        except:
            print(f'{ll} has 0 words')
            continue
        try:
            datas = soups.find('div', class_='left').find_all('li')
            for data in datas:
                cd ={}
                cd['name'] = data.find('a').get_text()
                cd['href'] = 'https://www.cidianwang.com' + data.find('a').get('href')
                cd['letter'] = str.upper(ll)
                cd["_id"] = str(cd['href']).split('/')[-1].split('.')[0]
                try:
                    cidainwang_cd.insertOne(cd)
                except Exception as e:
                    print(e)
        except:
            pass
        for page in range(1,math.ceil(int(num)/60)):
            url = 'https://www.cidianwang.com/cd/'+ ll + str(page)+'.htm'
            print(url)
            cdlists = get_cd_list(url)

            for cd in cdlists:
                cd['letter'] = str.upper(ll)
                cd["_id"] = str(cd['href']).split('/')[-1].split('.')[0]
                try:
                    cidainwang_cd.insertOne(cd)
                except Exception as e:
                    print(e)
            if len(cdlists) < 60:
                break
# 获取词典详细信息
def download_cd(cd):
    url = cd['href']
    print( cd['name'],url)
    response = requests.get(url, headers=headers, proxies=get_kuai_proxy(),timeout=10,cookies=cookies)
    response.encoding = "utf-8"
    soups = BeautifulSoup(response.text, 'lxml')
    infos = soups.find('div', class_='left')
    zd = infos.find('div', class_='zd').get_text() #拼音：zhāo xù
    cd['z_content'] = zd.strip()   #a_content
    a_content = infos.find('div', class_='a_content')
    cd['a_content'] =str(a_content)
    children =a_content.children
    strs = ''
    for child in children:
        strs +=child.get_text()
    cd['data'] =strs.split('关注词典网微信公众号')[0].strip()
    cd['status'] = 'success'
    # print(cd)
    return cd

def main():
    cidainwang_cd = mongo_manager("cidainwang_cd", db='public_data')
    while True:
        seeds = cidainwang_cd.findAll({'status': None}).limit(1000)
        lists = []
        for seed in seeds:
            lists.append(seed)
        print(len(lists))
        for cd in lists:
            try:
                cds = download_cd(cd)
                cidainwang_cd.updateOne({'_id': cds['_id']},  cds)
            except Exception as e:
                print(e)
        if len(lists) < 1000:
            break
    cidainwang_cd.close()


if __name__ == "__main__":
    print()
    # print(get_cd_num())
    # download_cdinfo()
    main()