'''
author:leemars
time:2021/1/1
langulage:python
'''
import requests
import lxml
import xlwt
from bs4 import BeautifulSoup
import bs4
import re
import xlrd
from tqdm import tqdm
import time

#可供修改区域
mldm = '07' #设置你的门类类别
yjxkdm = '0701' #设置你的学科类别

#准备工作
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'
}


#获取院校信息
def sch_obtion():
    findsch_sf = re.compile(r'<td>(.*?)</td>') #院校省份信息
    schms = []
    data_sch = []
    for i in tqdm(range(0,841,20)):
        print('')
        print('获取院校信息')
        url_sch = 'https://yz.chsi.com.cn/sch/?start='+str(i)
        html_sch = requests.get(url_sch,headers=header).content
        soup_sch = BeautifulSoup(html_sch,'html.parser')
        str_soup_sch = str(soup_sch)
        for item in soup_sch.find_all('div',class_="yxk-table")[0].find_all('table',class_="ch-table")[0].find_all('tbody')[0].find_all('tr'):
            sch = item.find('a',target='_blank').text.strip()#院校名称
            data_sch.append(sch)
            item = str(item)
            sch_sf = re.findall(findsch_sf,item)[0]#院校省份信息
            data_sch.append(sch_sf) 
    return data_sch


#获取考研信息
def schinf_obtion(data_sch):
    ss = ['北京','天津','河北','山西','内蒙古','辽宁','吉林','黑龙江','上海','江苏','浙江','安徽','福建','江西','山东','河南','湖北','湖南','广东','广西','海南','重庆','四川','贵州','云南','西藏','陕西','甘肃','青海','宁夏','新疆']
    ssdms = ['11','12','13','14','15','21','22','23','31','32','33','34','35','36','37','41','42','43','44','45','46','50','51','52','53','54','61','62','63','64','65']
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'
    }
    findksfs = re.compile(r'<td class="ch-table-center">(.*?)</td>')        #考试方式
    findyxs = re.compile(r'<td>(.*?)</td>')         #院系所
    findzy = re.compile(r'<td>(.*?)</td>')          #专业
    findyjfx = re.compile(r'<td>(.*?)</td>')        #研究方向
    findxxfs = re.compile(r'<td class="ch-table-center">(.*?)</td>')        #学习方式

    #爬取具体院校数据
    print('开始爬取')
    all_data = []
    for i in tqdm(range(0,len(data_sch)-1,2)):
        print('爬取考研信息')
        index = ss.index(data_sch[i+1])
        ssdm = ssdms[index]
        dwmc = data_sch[i]
        re_page = re.compile(r'>\d<')
        url_page = 'https://yz.chsi.com.cn/zsml/querySchAction.do?ssdm='+ssdm+'&dwmc='+dwmc+'&mldm=&mlmc=&yjxkdm='+yjxkdm+'&xxfs=&zymc='
        html_page = requests.get(url_page,headers=header).content
        soup_page = BeautifulSoup(html_page,'html.parser')
        str_soup_page = str(soup_page)
        page = soup_page.find_all('div',class_='zsml-page-box')
        page = str(page)
        page = re.findall(re_page,page)
        len_page = len(page)
        page_all = int(page[len_page-1].replace('>','').replace('<',''))+1
        for i in range(1,page_all):
            now_page = str(i)
            url = 'https://yz.chsi.com.cn/zsml/querySchAction.do?ssdm='+ssdm+'&dwmc='+dwmc+'&mldm='+mldm+'&mlmc=&yjxkdm='+yjxkdm+'&xxfs=&zymc='+'&pageno='+now_page
        
            html = requests.get(url,headers=header).content
            soup = BeautifulSoup(html,'html.parser')
            str_soup = str(soup)
            for item in soup.find_all('table',class_='ch-table')[0].find_all('tbody')[0].find_all('tr'):
                data = []
                oldsoup = item
                item = str(item)

                data.append(dwmc)

                re_id = re.compile(r'(\d\d\d\d\d)')
                url_schoolid = 'https://yz.chsi.com.cn/zsml/queryAction.do?ssdm=&dwmc='+dwmc+'&mldm='+mldm+'&mlmc=&yjxkdm='+yjxkdm+'&xxfs=&zymc='
                html_schoolid = requests.get(url_schoolid,headers=header).content
                soup_schoolid = BeautifulSoup(html_schoolid,'html.parser')
                str_soup_schoolid = str(soup_schoolid)
                schoolid = soup_schoolid.select('table.ch-table')
                schoolid = str(schoolid)
                schoolid = re.findall(re_id,schoolid)[0]
                data.append(schoolid)

                ksfs = re.findall(findksfs,item)[0]
                data.append(ksfs)

                yxs = re.findall(findyxs,item)[0]
                data.append(yxs)

                zy = re.findall(findzy,item)[1]
                data.append(zy)

                try:
                    yjfx = re.findall(findyjfx,item)[2]
                    data.append(yjfx)
                except:
                    data.append('')

                xxfs = re.findall(findxxfs,item)[1]
                data.append(xxfs)
        
                nzsrs = str(oldsoup.select('td.ch-table-center')[3]).replace('<td class="ch-table-center">',' ').replace('<script language="javascript">' ,'').replace('      document.write(cutString(','').replace('      </script>\n</td>','').replace('));','').strip()
                data.append(nzsrs)

                try:
                    bz = str(oldsoup.select('td.ch-table-center')[6]).replace('<td class="ch-table-center">',' ').replace('<script language="javascript">' ,'').replace('      document.write(cutString(','').replace('      </script>\n</td>','').replace('));','').replace('document.write(cutString(','').replace('     </script>\n</td>','').strip()
                    data.append(bz)
                except:
                    data.append('')

                college_id = yxs[1:4]
                major_id = zy[1:7]
                orient_id = yjfx[1:3]

                url2 = 'https://yz.chsi.com.cn/zsml/kskm.jsp?id='+schoolid + '21' + college_id + major_id + orient_id +'1'
                header = header = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69'
                }
                html2 = requests.get(url2,headers=header).content
                soup2 = BeautifulSoup(html2,'lxml')
        
                try:
                    kskm = soup2.find_all('tbody',class_='zsml-res-items')[0].select('td')
                    zz = kskm[0].text
                except:
                    zz = ''
                    print(dwmc)
                data.append(zz)
                try:
                    kskm = soup2.find_all('tbody',class_='zsml-res-items')[0].select('td')
                    wy = kskm[1].text.replace('\r\n','')
                except:
                    wy = ''
                    print(dwmc)
                data.append(wy)
                try:
                    kskm = soup2.find_all('tbody',class_='zsml-res-items')[0].select('td')
                    ywk1 = kskm[2].text
                except:
                    ywk1 = ''
                    print(dwmc)
                data.append(ywk1)
                try:
                    ywk2 = kskm[3].text
                    kskm = soup2.find_all('tbody',class_='zsml-res-items')[0].select('td')
                except:
                    ywk2 = ''
                    print(dwmc)
                data.append(ywk2)

                all_data.append(data)

    print('爬取成功')
    return all_data


#数据保存
def save_data(all_data):
    if len(all_data) != 0:
        #print('you are win')
        print("开始保存")
        data_length = len(all_data)
        savepath = "C:\\Users\\Administrator\\Desktop\\研招网数据.xls"
        book = xlwt.Workbook(encoding="utf-8",style_compression=0)
        sheet = book.add_sheet('研招网数据',cell_overwrite_ok=True)
        col =("学校名称","学校代码","考试方式","院系所","专业","研究方向","学习方式","拟招生人数","备注","政治","外语","业务课1","业务课2")
        for i in range(0,13):
            sheet.write(0,i,col[i])
        for i in tqdm(range(0,data_length)):
            #print('保存考研信息')
            data = all_data[i]
            for j in range(0,13):
                sheet.write(i+1,j,data[j])
        
        book.save(savepath)
        print('保存成功')


#主程序
save_data(schinf_obtion(sch_obtion()))