import requests
from bs4 import BeautifulSoup
import re
import csv

# 1.获取网页源代码
# 2.获取子网页的源代码
# 3.对子网页的源代码进行提取
url = 'http://college.gaokao.com/school/tinfo/273/result/9/1/'

#标识头 每个人的用户代理一般不同 看使用的浏览器是否不同 最好改一下
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36 Edg/100.0.1185.36"
}
#get请求
resp = requests.get(url, headers=headers)
resp.encoding = 'gb2312'

page = BeautifulSoup(resp.text, 'html.parser')
dlist = page.find('dl', class_='clearfix').find_all('a')

child_dlist = []  # 有链接的列表
# 所有的子链接
for li in dlist:
    href = li.get('href')
    # print(href)
    child_dlist.append(href)

# 对于每一个tr标签的正则表达式
obj = re.compile(r'<tr class=".*?">.*?<td>(?P<year>.*?)</td>.*?<td>(?P<min_score>.*?)</td>.*?</tr>', re.S)

# 使用文件保存数据

for li in child_dlist:
    url = li  # str类型
    child_resp = requests.get(url, headers=headers)
    child_resp.encoding = 'gb2312'
    # 对于每一个子链接中的div标签
    child_page = BeautifulSoup(child_resp.text, 'html.parser')
    # 如果找不到标签，直接执行下一个子链接
    if child_page.find(text='抱歉，没有找到相关内容'):
        continue
    # 这一个子页面中div标签中的所有tr标签
    trlist = child_page.find('div', id='pointbyarea').find_all('tr')

    # 找出地点
    name_ = child_page.find('dl', class_='clearfix').find('a', class_='on').text
    print(name_, end=' ') # 测试
    name_dict = {'地点': name_}
    f = open(f".\\data\\{name_dict['地点']}.csv", mode="w", encoding='utf-8', newline='')
    csvwriter = csv.writer(f)
    csvwriter.writerow(name_dict.values())
    for i in trlist:
        result = obj.finditer(str(i))  # 对于每一个tr标签去查看里面的年份，和分数
        for it in result:
            # 测试
            # print(it.group("year"), end=' ')
            # print(it.group("min_score"), end=' ')
            dic = it.groupdict()
            csvwriter.writerow(dic.values())
    f.close()
    print()
print("over")

# # obj = re.compile(r'<div id="pointbyarea">.*?<td>(?P<year>.*?)</td>.*?<td>(?P<min_score>.*?)</td>.*?</div>', re.S)
#
# #查看源代码后，没有多余的tr标签
#
# #对于每一个tr标签的正则表达式
# obj=re.compile(r'<tr class=".*?">.*?<td>(?P<year>.*?)</td>.*?<td>(?P<min_score>.*?)</td>.*?</tr>',re.S)
# #对于每一个子链接中的div标签
# page = BeautifulSoup(resp.text, 'html.parser')
# #这一个子页面中的所有tr标签
# trlist = page.find('div',id='pointbyarea').find_all('tr')
# name_=page.find('dl',class_='clearfix').find('a',class_='on').text
# print(name_,end=' ')
# for li in trlist:
#     result = obj.finditer(str(li)) #对于每一个tr标签去查看里面的年份，和分数
#     for it in result:
#         print(it.group("year"),end=' ')
#         print(it.group("min_score"),end=' ')
#
# # for li in child_dlist:
#
#     # result = obj.finditer(child_resp.text)
#     # for it in result:
#     #     print(it.group("year"))
# # child_page = BeautifulSoup(child_resp.text, 'html.parser')
# # child_info = child_page.find('div', id_='pointbyarea').find_all('td')
# # print(child_info)
