# -*- coding: utf-8 -*-
# 创建时间：2021/8/14 10:29
import time

from bs4 import BeautifulSoup
import requests
import pymysql
import re
from  hhy import HttpUtil,DateUtil
from xlwt import Workbook
import datetime
import urllib.request

__author__ = 'LuckyHhy'
#https://www.dszuqiu.com/diary/20210815


def get_content(diary,p):
    url='https://www.dszuqiu.com/diary/{0}/p.{1}'.format(str(diary),str(p))
    print('爬取地址:{}'.format(url))
    A=requests.session()
    headers={
        "User-Agent": HttpUtil.AgentRandom()
    }
    A.headers = headers
    cont = A.get(url, timeout=30, allow_redirects=False)
    return cont.text



def get_contet_by_page(diary):
    print('正在爬取{}_日期数据...'.format(str(diary)))
    #https://www.dszuqiu.com/diary/20210818/p.1
    #获取当日数据总分页数据
    page_list=get_page(diary)
    for p in page_list:
        data = []
        print("正在爬取第{}页数据....".format(p))
        # 获取分页数据
        cont=get_content(diary,p)
        soup = BeautifulSoup(cont, 'html.parser')
        page_text = soup.find(class_="diary-table").find('tbody').find_all('tr')

        for item in page_text:
            td = item.select('td')
            league_name = td[0].get_text()  # 联赛名称
            league_href = td[0].find('a')['href']  # 联赛查看地址
            league_url = 'https://www.dszuqiu.com{}'.format(league_href)
            time_kai = td[2].get_text()  # 开赛时间
            zhu_team = td[3].select('a')[0].get_text()  # 主队
            zhu_team_id = td[3].find('a')['href']  # 主队url
            zhu_team_url = 'https://www.dszuqiu.com{}'.format(zhu_team_id)
            # 队伍头像
            # zhu_team_img = get_team_img(zhu_team_url)
            ke_team = td[5].select('a')[0].get_text()  # 客队
            ke_team_id = td[5].select('a')[0]['href']  # 客队url
            ke_team_url = 'https://www.dszuqiu.com{}'.format(ke_team_id)
            # 队伍头像
            # ke_team_img=get_team_img(ke_team_url)
            # 保存
            dak = [league_name, league_url, time_kai, zhu_team, zhu_team_url, ke_team, ke_team_url]
            data.append(dak)

        # 保存数据
        pathh = '{0}日赛事第{1}页数据.xls'.format(str(diary),str(p))  # 存放路径
        save_xls(pathh,data)

        print("第{}页数据爬取完成！！！".format(p))
        #爬取一页休息一秒
        time.sleep(2)

    print('{}_日期数据爬取完成'.format(str(diary)))


# 获取团队头像
def get_team_img(url):
    #url='https://www.dszuqiu.com/team/10498'
    A = requests.session()
    headers = {
        "User-Agent": HttpUtil.AgentRandom()
    }
    A.headers = headers
    cont = A.get(url, timeout=30, allow_redirects=False)
    text= cont.text
    soup = BeautifulSoup(text, 'html.parser')
    img_url = soup.find(class_="teamImg")
    if img_url:
        return img_url.find('img')['src']
    else:
        return ''


# 获取网页总分页
def get_page(diary):
    print('正在获取分页数据.....')
    url = 'https://www.dszuqiu.com/diary/{}'.format(str(diary))
    con=HttpUtil.HttpGet(url)
    soup = BeautifulSoup(con, 'html.parser')
    pagination_get=soup.find(class_='pagination')
    page = []
    if pagination_get:
        pagination=pagination_get.find_all('li') #查找所有的分页
        for item in pagination:
            #print(item.get_text())
            p=re.sub(r'\D', "", item.get_text())
            if p:
                page.append(p)

    else:
        page=[1]

    print('分页数据获取完成:{}'.format(str(page)))
    return page




def save_xls(pathh,data):
    w = Workbook()
    ws = w.add_sheet('datas')  # 创建一个工作表
    i = 0
    for row in data:
        g = 0
        # print(f"正在执行第{i}行数据")
        for jj in row:
            ws.write(i, g, jj)  # 行 ，列  数据
            g = g + 1
        i = i + 1

    w.save(pathh)
    print('excel——保存成功')





# 保存到文本中
def save_txt(*args):
    for i in args:
        with open('saishi.txt', 'a', encoding='utf-8') as f:
            f.write(i)




def proxy_handel():
    proxy_list = [
        {'http': '106.45.104.227'},
    ]
    # proxy = {'http': '180.175.171.93'}
    # response=requests.get('https://www.dszuqiu.com/diary/20210818',proxies=proxy)
    # print(response.text)
    # exit()

    for proxy in proxy_list:
        print(proxy)
        # 创造处理器
        proxy_head = urllib.request.ProxyHandler(proxy)
        # 创建opener
        opener = urllib.request.build_opener(proxy_head)
        try:
            text=opener.open('https://www.bootcss.com/p/layoutit/', timeout=15).text()
            print(text)
        except Exception as e:
            print(e)


def main():
    # 未来三天
    day_arr=[]
    for i in [1,2,3]:
        day = (datetime.datetime.now() + datetime.timedelta(days=i)).strftime('%Y%m%d')
        day_arr.append(day)

    # 明天 后台 大后天
    for diary in day_arr:
        # 爬取数据
        get_contet_by_page(str(diary))

    pass


if __name__ == '__main__':
    main()
