


#
import requests as req
from bs4 import BeautifulSoup
import json


def get_request_text(url , headers):
    response = req.get(url = url , headers = headers)
    response.encoding = response.apparent_encoding
    status_code = response.status_code
    print(f"status_code={status_code}")
    if status_code == 200:
        print(f"text={response.text}")
        return response.text
    else:
        print(f"下载失败")
        return None


def get_data(text):
    soup = BeautifulSoup(markup=text , features='html.parser')
    item_list = soup.findAll(name = 'div' , attrs={'class':'news-item'})
    #item_list = soup.findAll(name = 'div' , class_='feed-card-item')
    for item in item_list:
        title = item.find(name = 'a').text
        data_str = item.find(name = 'div', attrs={'class':'action'}).find(name='span', attrs={'id':'bdshare'}).get('data')
        #data = json.loads(data_str)
        print(f"title:{title}")
        print(f"data_str:{data_str}")
        #print(f"data:{data.text},{data['url']} ,{data['pic']}")


def get_data_str(text):
    result_list = ''
    soup = BeautifulSoup(markup=text , features='html.parser')
    item_list = soup.findAll(name = 'div' , attrs={'class':'news-item'})
    #item_list = soup.findAll(name = 'div' , class_='feed-card-item')
    for item in item_list:
        title = item.find(name = 'a').text
        data_str = item.find(name = 'div', attrs={'class':'action'}).find(name='span', attrs={'id':'bdshare'}).get('data')
        result_list += title + '\n'
        result_list += data_str + '\n'
    return result_list
def standings(text):
    soup = BeautifulSoup(markup=text, features='html.parser')
    #积分榜
    table_list = soup.findAll(name='div', attrs={'class': 'cc'})
    for table in table_list:
        row_list = table.findAll(name='tr')
        for row in row_list:
            td_list = row.findAll(name='td')
            for td in td_list:
                td_value = td.text
                print(td_value , end='\t')
            print()
        print("*" * 30)

def get_standings(text):
        soup = BeautifulSoup(markup=text, features='html.parser')
        # 积分榜
        table_list = soup.findAll(name='div', attrs={'class': 'cc'})

        standings = []
        row_list = table_list[0].findAll(name='tr')
        for row in row_list:
            td_list = row.findAll(name='td')
            standings.append([td_value for td_value in td_list])

        #standings.append()
        row_list = table_list[1].findAll(name='tr')
        for row in row_list:
            td_list = row.findAll(name='td')
            standings.append([td_value for td_value in td_list])

        return standings


def get_standings_str(text):
    soup = BeautifulSoup(markup=text, features='html.parser')
    # 积分榜
    table_list = soup.findAll(name='div', attrs={'class': 'cc'})

    standings = '积分榜\n'
    row_list = table_list[0].findAll(name='tr')
    for row in row_list:
        td_list = row.findAll(name='td')
        tmp = [td_value.text for td_value in td_list]
        standings += ",".join(tmp)
        standings += "\n"

    standings += "射手榜\n"
    row_list = table_list[1].findAll(name='tr')
    for row in row_list:
        td_list = row.findAll(name='td')
        tmp = [td_value.text for td_value in td_list]
        standings += ",".join(tmp)
        standings += "\n"

    return standings



def writer_file(path , data):
    with open(file=path , mode='wt' , encoding='utf8') as f:
        f.write(data)



    #url = 'https://sports.qq.com/'
url = 'https://sports.sina.com.cn/csl/'
# 设置请求头，使其看起来像是从浏览器发出的
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    # 可以根据需要添加更多的请求头
}
reponse_text =  get_request_text(url , headers)
# get_data(reponse_text)
#standings(reponse_text)
result = get_data_str(reponse_text)
writer_file('data_news.txt' , result)
result = get_standings_str(reponse_text)
writer_file('data.txt' , result)

