import requests
import random
import time
import urllib.parse
from bs4 import BeautifulSoup

# 最大重试次数
max_retry = 4

# ip代理池每秒最多20次，因此sleep0.06秒
sleep_time = 0.06

baike_str = "百度百科是一部内容开放、自由的网络百科全书，旨在创造一个涵盖所有领域知识，服务所有互联网用户的中文知识性百科全书。在这里你可以参与词条编辑，分享贡献你的知识。"
url = 'https://baike.baidu.com/item/'

# user agent
ua_list = [
    'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 GTB6 (.NET CLR 3.5.30729)',
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en-US; .NET CLR 1.0.3328)',
    'Mozilla/5.0 (Windows NT x.y; rv:10.0) Gecko/20100101 Firefox/10.0',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)'
]

# 使用ip代理池绕过百度反爬机制
# 隧道域名:端口号
tunnel = ""

# 用户名密码方式
username = ""
password = ""

# proxy
proxies = {
    "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel},
    "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": username, "pwd": password, "proxy": tunnel}
}


def get_baike_summary(key_word):
    encoded_key = urllib.parse.quote(key_word, encoding='utf-8', errors='replace')
    target_url = url + encoded_key

    for attempt in range(max_retry):
        try:
            headers = {
                "User-Agent": random.choice(ua_list),
                "Accept-Language": "zh-CN,zh;q=0.9",
            }
            time.sleep(sleep_time)
            response = requests.get(target_url, headers=headers, proxies=proxies, timeout=10)
            response.raise_for_status()  # 检查HTTP错误状态码
            response.encoding = 'utf-8'  # 显式指定编码（避免乱码）

            res_str = str(response.content.decode('utf-8'))
            if not res_str or res_str == "" or baike_str in res_str:
                return ""

            soup = BeautifulSoup(response.text, 'html.parser')

            meta_description = soup.find("meta", attrs={"name": "description"})

            if meta_description:
                return meta_description["content"]
            else:
                return ""
        except requests.exceptions.HTTPError as e:
            # 单独处理 404 错误
            if e.response.status_code == 404:
                return ""
            # 处理其他 HTTP 错误（如 500、403 等）
            else:
                print(key_word + " 临时出现HTTP失败，状态码：" + str(e.response.status_code))
                continue
        except Exception as e:
            print(key_word + ' 临时出现其他失败，错误信息：' + str(e))
            continue
    print(key_word + ' 最终处理失败!!!')
    return ""
