from urllib import request
from io import BytesIO
import gzip
from bs4 import BeautifulSoup
import json


def get_html(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': "zh-CN,zh;q=0.9",
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Host': 'gaokao.chsi.com.cn',
    }
    Request = request.Request(url, headers=headers)
    response = request.urlopen(Request)
    html = response.read()
    buff = BytesIO(html)
    f = gzip.GzipFile(fileobj=buff)
    html = f.read().decode('utf-8')
    return html


def get_grade(href):
    html = get_html(href)
    soup = BeautifulSoup(html, "lxml")
    Map = {}
    contents = soup.find_all('ul', {'class': 'clearfix'})[0]
    for li in contents.find_all('li'):
        name = li.find_all('div', {'class': 'yxk-myd-name'})[0].get_text()
        echarts = li.find_all('div', {'class': 'yxk-myd-echarts'})[0]['data-id']
        Map[name] = echarts
    pingjia = []
    zypm = soup.find_all('div', {'class': 'yxk-col yxk-zypm clearfix'})[0]
    for div in zypm.find_all('div', {'class': 'col-list'}):
        List = []
        for tr in div.find_all('tr'):
            dic = {
                '排名': tr.find_all('span', {'class': 'index'})[0].get_text().replace('\n', '').replace('\r', '').strip(),
                '专业': tr.find_all('div', {'class': 'overf'})[0].get_text().replace('\n', '').replace('\r', '').strip(),
                '评分': tr.find_all('span', {'class': 'avg_rank'})[0].get_text().replace('\n', '').replace('\r',
                                                                                                         '').strip(),
                '评分人数': tr.find_all('span', {'class': 'vote_num_detail'})[0].get_text().replace('\n', '').replace('\r',
                                                                                                                  '').strip()}
            List.append(dic)
        pingjia.append(List)
    for div in zypm.find_all('div', {'class': 'col-list last-list'}):
        List = []
        for tr in div.find_all('tr'):
            dic = {'排名': tr.find_all('span', {'class': 'index'})[0].get_text(),
                   '专业': tr.find_all('div', {'class': 'overf'})[0].get_text().replace('\n', '').replace('\r',
                                                                                                        '').strip(),
                   '评分': tr.find_all('span', {'class': 'avg_rank'})[0].get_text().replace('\n', '').replace('\r',
                                                                                                            '').strip(),
                   '评分人数': tr.find_all('span', {'class': 'vote_num_detail'})[0].get_text().replace('\n', '').replace(
                       '\r', '').strip()}
            List.append(dic)
        pingjia.append(List)
    Map['专业满意度'] = pingjia[0]
    Map['专业推荐指数'] = pingjia[1]
    Map['专业推荐人数'] = pingjia[2]
    return Map


def get_base_info(url):
    html = get_html(url)
    soup = BeautifulSoup(html, "lxml")
    nodes = soup.find_all("tr")
    List = []
    for node in nodes[1:]:
        Map = {}
        s = []
        for n in node.find_all("td"):
            s.append(n.get_text().strip())
        Map['院校名称'] = s[0]
        Map['院校所在地'] = s[1]
        Map['教育行政主管部门'] = s[2]
        Map['院校类型'] = s[3]
        Map['学历层次'] = s[4]
        Map['一流大学'] = s[5] == '\ue664'
        Map['一流学科'] = s[6] == '\ue664'
        Map['研究生院'] = s[7] == '\ue664'
        Map['满意度'] = s[8]
        try:
            href = node.find_all('td', {'class': 'js-yxk-yxmc'})[0].a['href']
            dic = get_grade('https://gaokao.chsi.com.cn/' + href)
            Map.update(dic)
        except Exception as e:
            pass
        List.append(Map)
    return List
    
if __name__ == '__main__':
    url = 'https://gaokao.chsi.com.cn/sch/search--ss-on,option-qg,searchType-1,start-2840.dhtml'
    List = get_base_info(url)
    with open('data.json', 'w', encoding='utf-8') as f:
        json.dump(List, f, ensure_ascii=False, indent=4)

