#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :cidianwang_zd.py
# @Time      :2023/11/10 
# @Author    :CL
# @email     :1037654919@qq.com
# 爬取 https://www.cidianwang.com/zd/
import random
import time

from bs4 import BeautifulSoup
import requests
from retrying import retry
from tqdm import tqdm

from utils import mongo_manager, get_kuai_proxy

cidainwang_zd = mongo_manager("cidainwang_zd", db='public_data')
cidainwang_zd_suoyin = mongo_manager("cidainwang_zd_suoyin", db='public_data')

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}
cookies = {
    "Hm_lvt_e0eec4afa8ab3236c032068920f81d60": "1699509492",
    "Hm_lpvt_e0eec4afa8ab3236c032068920f81d60": "1699509492"
}


# https://www.cidianwang.com/bushou/ 部首索引
def get_bushou(url='https://www.cidianwang.com/bushou/'):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response.url,response)
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find('div', class_='left').find_all('a')
    lists = []
    for data in datas:
        name = data.get_text()
        href = 'https://www.cidianwang.com' + data.get('href')
        lists.append({'type': 'bushou', 'name': name, 'href': href})
    return lists


# https://www.cidianwang.com/pinyin/ 拼音索引
def get_pinyin(url='https://www.cidianwang.com/pinyin/'):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response)
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find('div', class_='bs').find_all('a')
    lists = []
    for data in datas:
        name = data.get_text()
        href = 'https://www.cidianwang.com' + data.get('href')
        lists.append({'type': 'pinyin', 'name': name, 'href': href})
    # print(lists)
    return lists


# https://www.cidianwang.com/bihua/ 笔画索引
def get_bihua(url='https://www.cidianwang.com/bihua/'):
    lists = []
    for i in range(1, 75):  # 最大笔画为74
        name = str(i)
        href = f'https://www.cidianwang.com/bihua/{i}hua.htm'
        lists.append({'type': 'bihua', 'name': name, 'href': href})
    # print(lists)
    return lists


# 获取字列表，基于笔画索引
def get_wenzi_list_bihua(url="https://www.cidianwang.com/bihua/1hua.htm"):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    lists = []
    try:
        hanzi = sooups.find('div', class_='left').find_all('div', class_='c1')  # 汉字
        bushou = sooups.find('div', class_='left').find_all('div', class_='c2')  # 部首
        pinyin = sooups.find('div', class_='left').find_all('div', class_='c3')  # 拼音
        for h, b, p in zip(hanzi, bushou, pinyin):
            wenzi = {}
            wenzi['name'] = h.find('a').get_text()
            wenzi['href'] = h.find('a').get('href')
            wenzi['bushou'] = b.get_text()
            wenzi['pinyin'] = p.get_text().strip()
            lists.append(wenzi)
    except:
        pass
    return lists


# 获取字列表，基于部首、拼音
def get_wenzi_list_bushou_pinyin(url="https://www.cidianwang.com/pinyin/ai3.htm"):
    response = requests.get(url, headers=headers, timeout=10,cookies=cookies)
    response.encoding = "utf-8"
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    lists = []
    try:
        hanzi = sooups.find('div', class_='left').find_all('div', class_='c1')  # 汉字
        bihua = sooups.find('div', class_='left').find_all('div', class_='c2')  # 笔画
        pinyin = sooups.find('div', class_='left').find_all('div', class_='c3')  # 拼音
        for h, b, p in zip(hanzi, bihua, pinyin):
            wenzi = {}
            wenzi['name'] = h.find('a').get_text()
            wenzi['href'] = 'https://www.cidianwang.com' + h.find('a').get('href')
            wenzi['bihua'] = b.get_text()
            wenzi['pinyin'] = p.get_text().strip()
            lists.append(wenzi)
    except:
        pass
    return lists
@retry(stop_max_attempt_number=3)
def get_wenzi_info(url2):
    response = requests.get(url2, headers=headers, cookies=cookies,timeout=10)# proxies=get_kuai_proxy()
    response.encoding = "utf-8"
    if response.status_code == 200:
        return response.text

# download_zdinfo:下载字
def download_zdinfo(souyin):
    url = souyin['href']
    for wenzi in get_wenzi_list_bushou_pinyin(url = url):
        # time.sleep(random.randint(1,2))
        url2 = wenzi['href']
        wenzi['_id'] = wenzi['href']
        print(wenzi['name'],wenzi['href'])

        response = get_wenzi_info(url2)
        if response:
            soups = BeautifulSoup(response ,'lxml')
            section = soups.find('div', class_='left').find('section')
            wenzi['section'] = str(section)
            # todo 完善section 的解析
            try:
                cidainwang_zd.insertOne(wenzi)
            except Exception as e:
                print(e)



def main():
    cidainwang_zd = mongo_manager("cidainwang_zd", db='public_data')
    cidainwang_zd_suoyin = mongo_manager("cidainwang_zd_suoyin", db='public_data')
    seeds = cidainwang_zd_suoyin.findAll({'status': None,'type': 'pinyin'})
    for souyin in seeds:
        print(souyin)
        time.sleep(2)
        download_zdinfo(souyin)
        # souyin['status'] = 'success'
        cidainwang_zd_suoyin.updateOne({'_id': souyin['_id']},{'status': 'success'})
    cidainwang_zd.close()
    cidainwang_zd_suoyin.close()
#souyin入库
def download_suoyin():

    lists=get_bushou()
    lists += get_pinyin()
    lists += get_bihua()
    for souyin in lists:
        souyin['_id'] = souyin["href"]
        try:
            cidainwang_zd_suoyin.insertOne(souyin)
        except BaseException as e:
            print(e)
#最后的操作，对section的解析并重新保存cidainwang_zd,用到BeautifulSoup
def parse_data_zd():
    seeds = cidainwang_zd.findAll({'info': None})
    for seed in tqdm(seeds):
        soup = BeautifulSoup(seed['section'], 'lxml')
        text= soup.text.split('，')
        seed['info'] = text
        seed['bpm'] = [bpm for bpm in text[1].split('：')[1].split(' ') if bpm != '']
        cidainwang_zd.updateOne({'_id': seed['_id']}, seed)

if __name__ == "__main__":
    print()
    # print(get_bushou())
    # download_suoyin()
    main()
    parse_data_zd()
