from bs4 import BeautifulSoup
import requests
import pandas as pd
import  re
import time
import json
import numpy as np
def urlparse():
    f=open("C:\\Users\\zpc\\Downloads\\python\\url.txt",'r',encoding= "utf-8")
    data=f.readlines()
    resd=re.compile("(h.*?会/.*\d)")
    urls=[]
    for i in data:
        for j in i.split('|'):
            if(resd.findall(j)):
                 urls.append(resd.findall(j)[0])
    return urls
def savedata(all_datas):
        print('数据保存中......')
        time.sleep(10)
        writer = pd.ExcelWriter('赛事数据.xlsx')
        for i in range(len(all_datas)):
             print(np.array(all_datas[i][:-2]).tolist())
             if(i==0):
                 year=1951
             else:
                  year=i*4+1950
             all_datas[i].to_excel(writer,sheet_name="{}年第{}届".format(year,i+1))
             print(f'第{i}页数据保存成功！')
        writer.save()
        time.sleep(10)
        print('数据保存完成！')
def  bs4table(res,all_data):
         re1=re.compile("\d+")#判断是否是数字
         bs4s=BeautifulSoup(res.text,"html.parser")
         data1=bs4s.find_all('table')#筛选出table
         tableid=0
         trs = {}
         tag=True
         for table in data1:
               pan=[]
               tableid+=1
               if(tag==True):
                      labels=[]
                      trs={}
                #判断符合要求的table
               if(len(table.select('tr'))>2 and len(table.select('tr')[1].select('td'))>4 and len(table.select('tr')[1].select('td'))<7):
                   content=table.select('tr')[1].select('td>div')
                   pan=re1.findall(content[2].text)
               if(pan!=[]):
                  tr=table.select('tr')#筛选出table 中的行
                  for row in range(len(tr)):
                        if(tr[row].select('td>div')):
                             colum=tr[row].select('td>div')
                        else:
                            colum = tr[row].select('th>div')
                        if(len(colum)<len(labels)):
                            for i in range(len(labels)-len(colum)):
                                  colum.insert(0,"")
                        for columid in range(len(colum)):#筛选出table 中的列
                            if(colum[columid]!=""):
                                if(row==0 and tag==True):
                                    trs[colum[columid].text]=[]
                                    labels.append(colum[columid].text)
                                else:
                                  if(re1.findall(colum[columid].text)):
                                    trs[labels[columid]].append(int(re1.findall(colum[columid].text)[0]))
                                  else:
                                     trs[labels[columid]].append(colum[columid].text)
                            else:
                                trs[labels[columid]].append(colum[columid])
                  tag=False#判段是否是第一行
         all_data.append(pd.DataFrame(trs))
         return all_data
def  spider_one_table(url,all_data):
            header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0'}
            res=requests.get(url,"lxml",headers=header)
            print(f"状态码为:{res.status_code}")
            res.encoding='utf8'
            time.sleep(10)
            all_data=bs4table(res,all_data)
            return all_data
def spiders(urls):
    page=0
    all_data=[]
    for url in urls:
       page+=1
       print(f"第{page}页正在爬取中.....")
       time.sleep(10)
       all_data=spider_one_table(url,all_data)
       print(f'第{page}页爬取结束!')
    savedata(all_data)
urls=urlparse()
spiders(urls)
