'''
Name : gradeSpider
Description : 爬取目标高校近三年的一本线
'''
import requests
import ast
import random
import re
import json

import time
import random

def random_sleep(mu=1, sigma=0.4):
    '''正态分布随机睡眠
    :param mu: 平均值
    :param sigma: 标准差，决定波动范围
    '''
    secs = random.normalvariate(mu, sigma)
    if secs <= 0:
        secs = mu  # 太小则重置为平均值
    time.sleep(secs)

from bs4 import BeautifulSoup
agent_list = [
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)'
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
    ' Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
    ' Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
    'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
    'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
    'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
    ' Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
    ' Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
    'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
    'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
    'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
    'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
    'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'
]
randint = random.randint(0,len(agent_list)-1)
def searchGradeBySchool(pro, sch, StuP, StuC):
    default = ['无','  无','  无','  无']
    url = "https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?resource_id=34559&query" \
          "={}" \
          "&co=tr[select1={},select2={}]|th[select1={},select2={}]" \
          "&format=json&oe=utf-8&ie=utf-8&cb=jQuery110203946010434658" \
          "084_1624678955623&_=162467895" \
          "5625".format(pro + sch, StuP, StuC, StuP, StuC)
    headers = {
        'User-Agent': agent_list[randint]}

    req = requests.get(url, headers)

    result = req.text.replace("/**/", "")
    #print(len(result))
    #print(re.findall("本科.批",result))
    if(len(result)<200):
        return default
    #line = "Cats are smarter than dogs"

    reObj = re.search(r'"data":', result).span()

    #print(reObj)

    result = result[reObj[0] + 8:-4]
    #print("RESULT:" + result)
    res_dict = ast.literal_eval(result)

    res_dict['loc'] = " "

    res_index = res_dict.keys()

    #print(res_index)

    resultlist = []
    for index in res_index:
        now_text = res_dict[index]
        if (now_text != "" and now_text != " "):
            resultlist.append(now_text)
    if(len(resultlist)<10):
        return default
    new_list = [resultlist[0], resultlist[1], resultlist[9]]
    # for i in new_list:
    #      print(i)

    grade_list = str(new_list[2])
    # print(grade_list)
    grade_pos = re.findall(r"{'text': '.*?'}", grade_list)
    # print(grade_pos)
    resultss = []
    for i, cont in enumerate(grade_pos):
        cont = cont.replace("'text': ", "").replace("{'", "").replace("'}", "")
        if(i!=1 and i!= 6 and i!= 11):
            resultss.append(cont)
    # print("学校："+school," 考生所在地："+StuPro," 考生文理:"+StuClass)
    # print(grade_pos)
    return resultss

#singleRes为长度为15的list,0-3 4-7，8-11分别为近三年的数据
#singleRes = searchGradeBySchool(pro="湖北",sch="黄冈师范学院",StuP=“,StuC=StuClass)

#下面要生成参数，即学校省份，名字，考生所在地，考生文理
'''返回类型 : 两个数组
   list1:省份列表
   list2:学校列表,为二维数组，一个维度为一个省份,对应list1
'''
ids = [{"name": "北京市", "id": "11"},
       {"name": "天津市", "id": "12"},
       {"name": "河北省", "id": "13 "},
       {"name": "湖南省", "id": "43"},
       {"name": "广东省", "id": "44"},
       {"name": "广西壮族自治区", "id": "45"},
       {"name": "山西省", "id": "14"},
       {"name": "海南省", "id": "46"},
       {"name": "内蒙古自治区", "id": "15"},
       {"name": "重庆市", "id": "50"},
       {"name": "辽宁省", "id": "21"},
       {"name": "四川省", "id": "51"},
       {"name": "吉林省", "id": "22"},
       {"name": "贵州省", "id": "52"},
       {"name": "黑龙江省", "id": "23"},
       {"name": "云南省", "id": "53"},
       {"name": "上海市", "id": "31"},
       {"name": "西藏自治区", "id": "54"},
       {"name": "江苏省", "id": "32"},
       {"name": "陕西省", "id": "61"},
       {"name": "浙江省", "id": "33"},
       {"name": "安徽省", "id": "34"},
       {"name": "福建省", "id": "35"},
       {"name": "江西省", "id": "36"},
       {"name": "山东省", "id": "37"},
       {"name": "河南省", "id": "41"},
       {"name": "湖北省", "id": "42"},
       {"name": "甘肃省", "id": "62"},
       {"name": "青海省", "id": "63"},
       {"name": "宁夏回族自治区", "id": "64"},
       {"name": "新疆维吾尔自治区", "id": "65"},
       ]
'''按省份返回学校'''
def searchSchoolNameByProV(provname):
    list = []
    headers = {'User-Agent': agent_list[randint],
               'Host':'yz.chsi.com.cn',
               }
    url = r'https://api.hcfpz.cn/un/schools?province={}'.format(provname)
    #print(url)
    res = requests.get(url, headers)
    #本来就是json用loads即可
    dict_data = json.loads(res.text.replace("（","(").replace("）",")"))
    puredata = dict_data['data']
    for i in puredata:
        list.append(i['name'])
    return list
#print(searchSchoolNameByProV("湖北省"))
'''返回中国所有学校以及对应省份'''
'''数据说明：  学校名字:省份'''
def searchSchoolName():
    schooldicts = {}
    headers = {'User-Agent': agent_list[randint],
               'Host':'yz.chsi.com.cn',
               }
    url = r'https://api.hcfpz.cn/un/schools?province'
    #print(url)
    res = requests.get(url, headers)
    #本来就是json用loads即可
    dict_data = json.loads(res.text)
    puredata = dict_data['data']
    for i,v in enumerate(puredata):
        #print(v['name'],v['province'])
        schooldicts[v['name']] = v['province']
        #print(len(schooldicts))
    return schooldicts
#dictsss = searchSchoolName()
#print(dictsss['华中师范大学'])