import requests  # 请求模块
import datetime  # 时间日期模块
from bs4 import BeautifulSoup  # 数据解析提取模块，需要提前使用pip下载”
import json
import re
import logging

logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
                    level=logging.INFO,
                    filename='Mylog.log',
                    filemode='a')
# 获取当天的日期,并进行格式化,用于后面文件命名，格式:20200420
today = datetime.date.today().strftime('%Y%m%d')


# 创建爬取数据函数
def crawl_wiki_data():
    # 创建请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/67.0.3396.99 Safari/537.36'
    }
    url = 'https://baike.baidu.com/item/%E9%9D%92%E6%98%A5%E6%9C%89%E4%BD%A0%E7%AC%AC%E4%B8%89%E5%AD%A3/51140980'
    try:
        response = requests.get(url, headers=headers, stream=True, verify=False)
        # 创建解析对象
        soup = BeautifulSoup(response.text, 'lxml')
        # 查找表单标签
        div = soup.find_all("div", attrs={"data-uuid": "go12lpqgpn"})
        return div[0].find_all("table")
    except json.JSONDecodeError as e:
        print(e)


def parse_wiki_data(html_table):
    global stars_length
    # 把table转化为BeautifulSoup对象
    soup = BeautifulSoup(str(html_table), 'lxml')
    # 获取表格元素中的所有行
    all_trs = soup.find_all('tr')
    # 存储参赛学员信息
    stars = []
    for tr in all_trs[1:]:
        star = {}
        all_tds = tr.find_all('td')
        # 姓名
        star["name"] = all_tds[0].text
        # 连接
        if not all_tds[0].find('a') is None:
            star["link"] = 'https://baike.baidu.com' + all_tds[0].find('a').get('href')
        else:
            star["link"] = 'https://baike.baidu.com'
        # 籍贯
        star["zone"] = all_tds[1].text
        # 身高
        star["height"] = all_tds[2].text
        # 体重
        star["weight"] = all_tds[3].text
        # 公司
        star["company"] = all_tds[4].text
        # 添加
        stars.append(star)
        persons_link_list.append(star["link"])  # 添加参赛选手的个人简介的链接
        name_list.append(star["name"])
        stars_length = len(stars)

    print('stars count:{0}'.format(stars_length))

    # 序列化编码
    with open(r"cansaixuanshou_" + today + '.json', 'w+', encoding="UTF-8") as file:
        # 永久化
        json.dump(stars, file, ensure_ascii=False, indent=2)


def cat_pic(link, name):
    # 创建请求头
    headers = {
        "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
    }
    try:
        response = requests.get(link, headers=headers, stream=True, verify=False)
        response.encoding = "UTF-8"
        pattern = r'"url":"(https://bkimg\.cdn\.bcebos\.com/pic/[^"]+)"'
        match = re.search(pattern, response.text)
        url = match.group(1)
        pic_response = requests.get(url, headers=headers, stream=True, verify=False)
        with open("persons_images/%s" % str(name) + ".jpg", 'wb+') as f:  # wb+ 读写方式打开或建立一个二进制文件，允许读和写。
            f.write(pic_response.content)
    except Exception as e:
        print(e)
    finally:
        print("%s的图片下载完成" % name)
        logger.info("%s的图片下载完成\n" % name)


if __name__ == '__main__':
    persons_link_list = []
    name_list = []
    stars_length = 0

    table = crawl_wiki_data()
    parse_wiki_data(table)

    for i in range(stars_length):
        cat_pic(persons_link_list[i], name_list[i])


