"""
获取字典
"""

import random
import socket
import time

import requests
from bs4 import BeautifulSoup
from lxml import etree


def get_user_agent():
    """
    随机获取user-agent
    :return:
    """
    user_agent_file = "./setting/user_agent.txt"
    user_agent_list = list()
    try:
        f = open(user_agent_file, "r")
        user_agent_list = f.read().splitlines()
    except IOError:
        print("文件读取失败，文件目录：{}", user_agent_file)
    finally:
        f.close();
    user_agent = user_agent_list[random.randint(0, len(user_agent_list) - 1)]
    return user_agent


def download_dict(url):
    response = requests.get(url)
    response.encoding = "gbk"
    if response.status_code == 200:
        html_source = response.text
        html_root = etree.HTML(html_source)
        font_list = html_root.xpath("//tr/td/a/font[@color='000000' and @size='7']")

        # 创建JSON对象，保存爬取的值
        hanzi_lst = []

        if font_list:
            for hanzi in font_list:
                hanzi_text = hanzi.xpath("./text()")
                hanzi_url = hanzi.xpath("../@href")
                hanzi_lst.append(dict(text=hanzi_text[0], url=hanzi_url[0]))
                # print(f"当前汉字{hanzi_text},对应的链接：{hanzi_url}")
        response.close()
        return hanzi_lst

    else:
        print("{}是错误的链接", url)


def find_all_word():
    """
    爬取所有的汉字
    :return:
    """
    hanzi_lst = []
    for i in range(1, 8):
        print(i)
        if i == 1:
            url = 'http://www.zd9999.com/zi/index.htm'
        else:
            url = f'http://www.zd9999.com/zi/index_{i}.htm'
        rs = download_dict(url)
        hanzi_lst.extend(rs)
    return hanzi_lst


def find_hanzi_detail(text):
    socket.setdefaulttimeout(20)
    url = f'https://hanyu.baidu.com/s?wd={text}&ptype=zici&tn=sug_click'
    headers = dict()
    headers["User-Agent"] = get_user_agent()  # 系统和浏览器版本
    headers["Referer"] = "http://www.baidu.com"
    cookies = dict(uuid='b18f0e70-8705-470d-bc4b-09a8da617e15',
                   UM_distinctid='15d188be71d50-013c49b12ec14a-3f73035d-100200-15d188be71ffd')  # 设置cookie
    response = requests.get(url, headers=headers)
    response.encoding = "utf-8"
    html_source = response.text
    soup = BeautifulSoup(html_source, "lxml")
    hanzi_detail = {"text": text}
    hanzi_detail["url"] = url
    base_info = {}
    # 获取头部信息
    word_header = soup.find("div", id="word-header")
    if word_header:
        tone_py_tag = word_header.find(id="tone_py")
        if tone_py_tag:
            tone_py = tone_py_tag.b.text  # 获取拼音
            base_info["tone_py"] = tone_py

        stroke_count_tag = word_header.find(id="stroke_count")
        if stroke_count_tag:
            stroke_count = word_header.find(id="stroke_count").span.text  # 笔画
            base_info["stroke_count"] = stroke_count

        wuxing_tag = word_header.find(id="wuxing")
        if wuxing_tag:
            wuxing = word_header.find(id="wuxing").span.text  # 五行
            base_info["wuxing"] = wuxing

        traditional_tag = word_header.find(id="traditional")
        if traditional_tag:
            traditional = word_header.find(id="traditional").span.text  # 繁体
            base_info["traditional"] = traditional

        hanzi_detail["base_info"] = base_info

    # 获取基本解释
    basic_mean = soup.find("div", id="basicmean-wrapper")
    if basic_mean:
        basic_content_dl = basic_mean.find("div", class_="tab-content").find_all("dl")
        basic_content = "";
        if basic_content_dl:
            for dl in basic_content_dl:
                if dl.find("dt"):
                    basic_content = basic_content + dl.dt.text + "\n"
                dd_lst = dl.find_all("dd")
                if dd_lst:
                    for dd in dd_lst:
                        ol = dd.find("ol")
                        if ol:
                            li_lst = ol.find_all("li")
                            if li_lst:
                                for index, li in enumerate(li_lst):
                                    basic_content = basic_content + str(index + 1) + "." + li.text + "\n"
                            else:
                                basic_content = basic_content + ol.text + "\n"
                        else:
                            basic_content = basic_content + dd.text + "\n"
                else:
                    basic_content = basic_content + dl.text + "\n"
        basic_content = "".join([s for s in basic_content.splitlines(True) if s.strip()])
        hanzi_detail["basic_content"] = basic_content
    # 获取详细解释
    detail_mean = soup.find("div", id="detailmean-wrapper")
    if detail_mean:
        detail_content_dl = detail_mean.find("div", class_="tab-content").find_all("dl")
        detail_content = ""
        if detail_content_dl:
            for dl in detail_content_dl:
                if dl.find("dt"):
                    detail_content = detail_content + dl.dt.text + "\n"
                dd_lst = dl.find_all("dd")
                if dd_lst:
                    for dd in dd_lst:
                        ol = dl.find("ol")
                        if ol:
                            li_lst = ol.find_all("li")
                            if li_lst:
                                for index, li in enumerate(li_lst):
                                    detail_content = detail_content + str(index + 1) + "." + li.text + "\n"
                            else:
                                detail_content = detail_content + ol.text + "\n"
                        else:
                            detail_content = detail_content + dd.text + "\n"
                else:
                    detail_content = detail_content + dl.text + "\n"
        hanzi_detail["detail_content"] = detail_content

    # 获取百科解释
    baike_content = ""
    baike_mean = soup.find("div", id="baike-wrapper")
    if baike_mean:
        baike_content = baike_mean.find("div", class_="tab-content").find("p").contents[0]
        baike_content = baike_content.replace(" ", "")
        baike_content = "".join([s for s in baike_content.splitlines(True) if s.strip()])
        hanzi_detail["baike_content"] = baike_content

    response.close()
    return hanzi_detail


def find_hanzi(hanzi_lst):
    """
    查询汉字的具体说明
    :param hanzi_lst:
    :return:
    """
    if hanzi_lst:
        # TODO
        return None
    else:
        print("汉字字典为空")

    return hanzi_lst


if __name__ == "__main__5":
    print(1)
    hanzi_lst = find_all_word()
    print(2)
    # 字典所有信息
    hanzi_detail_lst = list()
    if hanzi_lst:
        print(3)
        for item in hanzi_lst:
            hanzi = item["text"]
            print(hanzi)
            hanzi_detail = find_hanzi_detail(hanzi)
            hanzi_detail_lst.append(hanzi_detail)
            time.sleep(random.randint(10, 20))
    print(hanzi_detail_lst)


if __name__ == "__main__":
    print(get_user_agent())
    hanzi_detail = find_hanzi_detail("吖")
    print(hanzi_detail)
