import requests
from bs4 import BeautifulSoup
import json
import re
from time import sleep
from random import random
#未优化,单线程,

#记得 修改cookies
cookies = {"JSESSIONID":"3C8AFD46752D35BCAD215E39B8DBC4B8",
            "zg_did":"%7B%22did%22%3A%20%22174399263934f3-07686ae264d9f08-31624644-1fa400-17439926394bf%22%7D",
            "zg_adfb574f9c54457db21741353c3b0aa7":"%7B%22sid%22%3A%201601474416937%2C%22updated%22%3A%201601474416950%2C%22info%22%3A%201601474416947%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22yz.chsi.com.cn%22%2C%22landHref%22%3A%20%22https%3A%2F%2Fyz.chsi.com.cn%2Fzsml%2FqueryAction.do%22%7D",
            "_ga":"GA1.3.622649380.1598693730",
            "CHSICC_CLIENTFLAGZSML":"fdc0c72e078135f955e18f6745458ca4",
            "_gid":"GA1.3.1345412350.1601471826",
            "acw_tc":"2760825f16014744164306009ea08deae0052096efe018a12462542bfe342b"}

headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0'}

def get_html(url,page=1,deepin=True,ssdm='',dwmc=''):
    if deepin:
        try :
            global cookies,headers
            data = {"ssdm":ssdm,"dwmc":dwmc,"mldm":"08","mlmc":"","yjxkdm":"0812","zymc":"","xxfs":"","pageno":str(page)}
            #data 里面的mldm 是 大分类里的数字,  yjxkdm是小分美对应的数字,通过修改数字,改变爬取的范围,
            r = requests.post(url,cookies = cookies,data = data,headers = headers)
            r.raise_for_status()
            print(r)
            sleep(random()*2)
            return r.text
        except:
            return 0
    else :
        try :
            r = requests.get(url,headers = headers)
            r.raise_for_status()
            sleep(random()*2)
            print(r)
            return r.text
        except :return 0

def data_clear1(text):
    soup = BeautifulSoup(text,'lxml')
    a = soup.find("tbody")
    b = a.find_all('tr')
    c = list()
    for i in b :
        d = dict()
        d['学校'] = i.find('a').string
        d["位置"] = i.find_all('td')[1].string
        d["研究生院"] = "是" if i.find_all('td')[2].string != '\u2002' else  "否"
        d["自划线院校"] = "是" if i.find_all('td')[3].string != '\u2002' else  "否"
        d['博士点'] = "是" if i.find_all('td')[4].string != '\u2002' else  "否"
        d['URL'] = "https://yz.chsi.com.cn"+i.find('a')['href']
        c.append(d)
    return c

def data_clear3(text):#考试范围
    soup = BeautifulSoup(text,'lxml')
    a = soup.find_all("tbody",{'class':'zsml-res-items'})
    b = []
    for i in a:
        c = re.findall("<td>(.*?)<span",str(i),re.S)
        d = []
        for j in c :
            d.append(j.strip())
        b.append(d)
    return d

def get_area(url):
    global cookies,headers    
    try:
        r = requests.get(url,headers = headers ,cookies = cookies)
        r.raise_for_status()
        sleep(random()*2)
        print(r)
        return r.text
    except:return 0

def data_clear2(text):
    soup = BeautifulSoup(text,'lxml')
    a = soup.find("tbody")
    b = a.find_all('tr')
    c = list()
#在这里往b里添加页数的 g
    try:#如果只有一页则抛出一个   list index out of range 的错误,然后就pass了,如果有第二页甚至更多的话,他就是最大那一页的数字
        g = soup.find_all('div',{'class':'zsml-page-box'})[0].ul.find_all('a')[-2].string#最大的页数
        dwmc = re.findall('''name="dwmc" value="(.*?)"/>''',text)[0]
        ssdm = re.findall('''doSelect\(document.getElementById\("form2"\).ssdm,'(\d+)'\);''',text)[0]
        for i in range(2,int(g)+1):
            h = get_html('https://yz.chsi.com.cn/zsml/querySchAction.do',page=i,ssdm=ssdm,dwmc=dwmc)
            soup1 = BeautifulSoup(h,'lxml')
            k = soup1.find('tbody').find_all('tr')
            b.extend(k)
    except:
        pass
    
    for i in b :
        d = dict()
        d['考试方式'] = i.find('td').string
        d["院系所"] = i.find_all('td')[1].string
        d["专业"] = i.find_all('td')[2].string
        d["拟招人数"] = re.findall("document\.write\(cutString\('专业：(.*?)',6\)\);",str(i))
        d["研究方向"] = i.find_all('td')[3].string 
        d['学习方式'] = i.find_all('td')[4].string 
        d['跨专业'] = i.find_all('a')[1].string if i.find_all('a')[1].string != None else "不确定"
        e = "https://yz.chsi.com.cn" + b[0].find_all('td')[-3].find('a')['href']
        f =get_area(e)
        d['考试范围'] = data_clear3(f)
        c.append(d)
    return c

def main():
    d = list()
    for i in range(2,11):
        a = get_html("https://yz.chsi.com.cn/zsml/queryAction.do",page = i)
        sleep(random()*2)
        b = data_clear1(a)
        for i in b :
            c = get_html(i['URL'],deepin=False)
            sleep(random()*2)
            e = data_clear2(c)
            i["专业方向"] = e
            d.append(i)
            with open("学校.json",'a+',encoding="utf-8") as f:
                f.write(json.dumps(i,ensure_ascii=False)+'\n')
    # print(d)#当json中有中文字符串时，需要在open时加上encoding=‘utf-8'，dump时加上ensure_ascii=False，如下所示：
if __name__ == "__main__":
    main()
# with open('4.html','r',encoding='utf-8') as f:
#     a = f.read()

# print(data_clear2(a))

#时间 2020年10月1日 7:41:PM
#BY 江户川_柯北

