# -*- coding: utf-8 -*-
# 创建时间：2021/8/14 10:59
from bs4 import BeautifulSoup
import requests
import time, math, random, json
import pymysql
import re
from xlwt import Workbook
from hhy import FileUtil, DateUtil, ToolUtil, HttpUtil
from hashlib import md5

__author__ = 'LuckyHhy'


def get_content(url):
    headers = {"User-Agent": HttpUtil.AgentRandom()}
    A = requests.session()  # 用一个会话请求
    A.headers = headers
    cont = A.get(url, timeout=20, allow_redirects=False)
    return cont.text



def deal_content():
    url = 'https://www.dszuqiu.com/diary/20210828'
    content=HttpUtil.HttpGet(url,10)
    soup = BeautifulSoup(content, 'html.parser')
    selection=soup.select('#league-diary-filter > form')
    print(selection)
    exit()

    output = """{}-{}-{}\n"""
    print("正在爬取数据....")
    st = time.time()  # 程序开始时间
    #print(selection)
    for item in selection:
        dataList2=item.find_all(class_='dataList')
        for list in dataList2:
            dataTabs=list.find(class_='dataListCon').find(class_='dataTabsContent2').find_all(class_='content')
            for txt in dataTabs:
                li_arr=txt.find(class_='dataList2').find(class_='dataListCon2').find_all('li')
                for li in li_arr:
                    #league_name= li.get_text()
                    league_url=li.find('a')['href']
                    # print(league_name)
                    # print(league_url)
                    # exit()
                    league_detail,league_name=get_deail(league_url)
                    # 保存到txt文本
                    save_txt(output.format(league_name, league_detail,league_url))


    end = time.time()
    # 计算执行时间
    exec_time = end - st
    print("总共用时" + str(exec_time) + "秒。")




# 保存到文本中
def save_txt(*args):
    for i in args:
        with open('liansai.txt', 'a', encoding='utf-8') as f:
            f.write(i)



def get_deail(url):
    url='https://www.dszuqiu.com{}'.format(str(url))
    content=HttpUtil.HttpGet(url,10)
    soup = BeautifulSoup(content, 'html.parser')
    #title = soup.find(attrs={"name": "keywords"})['content']
    title = soup.title.string
    #title=soup.select("#ended")[0].find('table').find('tbody').select('tr')[0].find(class_='bg1').get_text()
    ss = title.split('赛程')
    print(ss)
    two = ss[0].split('_')
    print(two)
    return two[0],two[1]



def main():
    deal_content()
    pass


if __name__ == '__main__':
    main()
