
import logging
from queue import Full
import requests
import time
import json
from bs4 import BeautifulSoup
import re
from insert_to_db import insert_items_to_db

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("chinese_download.log", encoding="utf-8"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

website = 'http://56nl.com'
items = []
arr = ["/zm/5/","/zm/8/","/zm/11/","/zm/14/","/zm/18/","/zm/22/","/zm/25/","/zm/28/",
       "/zm/31/","/zm/34/","/zm/37/","/zm/41/","/zm/45/","/zm/48/","/zm/50/","/zm/56/","/zm/59/",
       "/zm/62/","/zm/65/","/zm/68/","/zm/72/","/zm/75/"
       ]

def dwonload_chinese_content():
    for ch in arr[:1]:
        headers = {
            "User-Agent": (
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                "AppleWebKit/537.36 (KHTML, like Gecko) "
                "Chrome/115.0.0.0 Safari/537.36"
            ),
            "Accept": "*/*",
            "Connection": "keep-alive",
           "Accept-Charset": "utf-8"  # 指定接受的字符集为 UTF-8
            # 如有需要可添加 Referer、Cookie 等
        }
        url = website + ch
        try:
            response = requests.get(url, stream=True,headers=headers, timeout=(60))  # 连接超时40秒，读取超时60秒
            response.encoding = 'utf-8'  # 设置响应编码为 UTF-8
            response.raise_for_status()
            html = response.text
            if html.strip() == '':
                return
            btn_elements = extract_btn_w4(html)
            for el in btn_elements:
                # 打印出元素的内容，可以按需做进一步处理
                elLiBoxs = el.find_all("li")
                for item in elLiBoxs[:30]:
                    a = item.find("a",title=True)
                    # 从 a 标签的 title 属性获取词组
                    phrase = a.get('title').strip()
                    # 从 <span> 标签获取拼音
                    span = a.find('span')
                    pinyin = span.get_text(strip=True) if span else ''
                    href = a.get('href').strip()
                    time.sleep(2)
                    obj = {
                        "phrase":phrase,
                        "pinyin":pinyin
                    }
                    getCidianDetail(href,obj) # 获取词组详细信息
        except Exception as e:
            logger.error(f"下载失败：{url}，错误信息：{str(e)}")
        time.sleep(4)

#  获取词典信息
def getCidianDetail(cdurl,obj):
    if cdurl.strip() == '': return
    url = website +  cdurl
    headers = {
        "User-Agent": (
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
            "AppleWebKit/537.36 (KHTML, like Gecko) "
            "Chrome/115.0.0.0 Safari/537.36"
        ),
        "Accept": "*/*",
        "Connection": "keep-alive",
        "Accept-Charset": "utf-8"  # 指定接受的字符集为 UTF-8
        # 如有需要可添加 Referer、Cookie 等
    }
    try:
        response = requests.get(url, stream=True,headers=headers, timeout=(60))  # 连接超时40秒，读取超时60秒
        response.encoding = 'utf-8'  # 设置响应编码为 UTF-8
        response.raise_for_status()
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        # 只匹配 class 属性等于 ["indent"] 的标签
        elements = [tag for tag in soup.find_all(class_="indent") if tag.get("class") == ["indent"]]
        meanings = ""
        for item in elements:
            sr = item.get_text(strip=True) if item else ""
            sr = re.sub(r'^[^\u4e00-\u9fff]+', '', sr)
            meanings = meanings + "\n" + sr
        
        # 造句
        sentence = ''
        sentenceDom = next((tag for tag in soup.find_all() if tag.get("class") == ["mtb"]), None)
        if sentenceDom != Full:
            sentence = "".join(str(item) for item in sentenceDom.contents)

        obj["meanings"] = meanings
        obj["sentence"] = sentence
        items.append(obj)

    except Exception as e:
        logger.error(f"下载失败次级：{url}，错误信息：{str(e)}")



def extract_btn_w4(html):
    soup = BeautifulSoup(html, 'html.parser')
    # 找到所有包含 "btn w4" 类的元素
    elements = soup.find_all(class_="btn w4")
    return elements
if __name__ == '__main__':
    dwonload_chinese_content()
    insert_items_to_db(items)