import json
import re
from wsgiref import headers

import requests
import datetime
import os

from bs4 import BeautifulSoup

today = datetime.date.today().strftime('%Y%m%d')


def crawl_wike_data():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/73.0.3683.103 Safari/537.36 '
    }


url = 'https://baike.baidu.com/item/青春有你第二季'

try:
    respose = requests.get(url, headers=headers)
    print(respose.status_code)

    soup = BeautifulSoup(respose.text, 'lxml')

    tables = soup.find_all('table', {'class': 'table-view log-set-param'})

    crawl_table_title = "参赛学员"

    for table in tables:
        table_title = table.find_previous('div').find_all('h3')
        for title in table_title:
            if crawl_table_title in title:
                table


except Exception as e:
    print(e)


def parse_wiki_data(table_html):
    bs = BeautifulSoup(str(table_html), 'lxml')
    all_trs = bs.find_all('tr')

    error_list = ['\'', '\"']

    stars = []

    for tr in all_trs[1:]:
        all_tds = tr.find_all('td')

        star = {}
        star["name"] = all_tds[0].text
        star["link"] = 'https://baike.baidu.com' + all_tds[0].find('a').get('href')

        star["zone"] = all_tds[1].text
        star["constellation"] = all_tds[2].text
        star["height"] = all_tds[3].text
        star["weight"] = all_tds[4].text

        flower_word = all_tds[5].text
        for c in flower_word:
            if c in error_list:
                flower_word = flower_word.replace(c, '')
                star["flower_word"] = flower_word

        if not all_tds[6].find('a') is None:
            star["company"] = all_tds[6].find('a').text
        else:
            star["company"] = all_tds[6].text

        stars.append(star)
    json_data = json.loads(str(stars).replace("\'", "\""))
    with open('work/' + today + '.json', 'w', encoding="UTF-8") as f:
        json.dump(json_data, f, ensure_ascii=False)
