#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :cidianwang_cy.py
# @Time      :2023/11/9 
# @Author    :CL
# @email     :1037654919@qq.com
#  @Software  :PyCharm
# 爬取 https://www.cidianwang.com/cy/
import math
import re
from bs4 import BeautifulSoup
import requests
from utils import mongo_manager,get_kuai_proxy
cidainwang_cy = mongo_manager("cidainwang_cy",db='public_data')

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}
cookies = {
    "Hm_lvt_e0eec4afa8ab3236c032068920f81d60": "1699509492",
    "Hm_lpvt_e0eec4afa8ab3236c032068920f81d60": "1699509492"
}
# 获取所有词典的网址 成语词典 新华字典 …………
def get_cidianlist(url = "https://www.cidianwang.com/cy/"):

    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    # print(response.text)
    print(response)
    from bs4 import BeautifulSoup

    sooups = BeautifulSoup(response.text, 'lxml')
    datas = sooups.find('div', class_='title').find_all('li')
    lists = []
    for data in datas:
        name = data.find('a').get_text()
        href =  data.find('a').get('href')
        lists.append({'name': name, 'href': href})
    print(lists)
    return lists
# 获取成语字母和数量（数量不准确，）
def get_cy_num(url = "https://www.cidianwang.com/cy/"):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    # print(response.text)
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    datas = sooups.find('div', class_='slist').find_all('li')
    lists = []
    for data in datas:
        name = data.find('a').get_text()
        num = data.get_text()
        num  = re.findall(r"\d+", num)[0]

        href =  data.find('a').get('href')
        lists.append({'name': name, 'href': href, 'num': num})
    # print(lists)
    return lists
# 获取成语列表，基于字母
def  get_cy_list(url = "https://www.cidianwang.com/cy/a1.htm"):
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = "utf-8"
    print(response)
    sooups = BeautifulSoup(response.text, 'lxml')
    lists = []
    try:
        datas = sooups.find('div', class_='left').find_all('li')
        for data in datas:
            name = data.find('a').get_text()
            href = 'https://www.cidianwang.com'+ data.find('a').get('href')
            lists.append({'name': name, 'href': href})
    except:
        pass
    return lists
# download_cyinfo:下载成语网址
def download_cyinfo():
    for ll in get_cy_num():
        url  ='https://www.cidianwang.com/cy/'+ ll['href']
        response = requests.get(url, headers=headers, cookies=cookies,proxies=get_kuai_proxy())
        response.encoding = "utf-8"
        soups = BeautifulSoup(response.text, 'lxml')
        rzbar = soups.find('div', class_='rzbar').get_text()
        num  = re.findall(r"\d+", rzbar)[0]
        print(rzbar,num)
        ll['num'] = num
        # 此处ll 可以保存 暂略

        try:
            datas = soups.find('div', class_='left').find_all('li')
            for data in datas:
                cy ={}
                cy['name'] = data.find('a').get_text()
                cy['href'] = 'https://www.cidianwang.com' + data.find('a').get('href')
                cy['letter'] = ll['name']
                cy["_id"] = str(cy['href']).split('/')[-1].split('.')[0]

                try:
                    cidainwang_cy.insertOne(cy)
                except Exception as e:
                    print(e)
        except:
            pass
        for page in range(1,math.ceil(int(num)/60)):
            url = 'https://www.cidianwang.com/cy/'+ str(ll['href']).split('.')[0] +str(page)+'.htm'
            print(url)
            cylists = get_cy_list(url)
            for cy  in cylists:
                cy['letter'] = ll['name']
                cy["_id"] = str(cy['href']).split('/')[-1].split('.')[0]
                try:
                    cidainwang_cy.insertOne(cy)
                except Exception as e:
                    print(e)
# 获取成语详细信息
def download_cy(cy):
    url = cy['href']
    print( cy['name'],url)
    response = requests.get(url, headers=headers, proxies=get_kuai_proxy(),timeout=10,cookies=cookies)
    response.encoding = "utf-8"
    soups = BeautifulSoup(response.text, 'lxml')
    infos = soups.find('div', class_='left')
    z_content = infos.find('div', class_='z_content').get_text() #拼音：ān cháng shǒu gù，简 拼：acsg
    cy['z_content'] = z_content.strip()
    h2 = infos.find('div', class_='js').find_all('h2')
    pp = infos.find('div', class_='js').find_all('p')
    for h,p in zip(h2,pp):
        cy[h.get_text().replace('：','')] = p.get_text()
    # print(cy)
    cy['status'] = 'success'
    return cy

def main():
    cidainwang_cy = mongo_manager("cidainwang_cy", db='public_data')
    while True:
        seeds = cidainwang_cy.findAll({'status': None}).limit(1000)
        lists = []
        for seed in seeds:
            lists.append(seed)
        print(len(lists))
        for cy in lists:
            try:
                cys = download_cy(cy)
                cidainwang_cy.updateOne({'_id': cys['_id']},  cys)
            except Exception as e:
                print(e)
        if len(lists) < 1000:
            break
    cidainwang_cy.close()


if __name__ == "__main__":
    print()
    # download_cyinfo()
    main()