# -*- coding: utf-8 -*-
# import urllib.request
# from bs4 import BeautifulSoup
import requests
import re
from lxml import etree
import xlwt
# from html.parser import HTMLParser
# from lxml.html import fromstring, tostring
def url_get(url):
    try:
        head={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0"}
        response = requests.get(url,headers=head,timeout=15)
        text = response.content.decode("utf-8")
        html = etree.HTML(text)
    except requests.exceptions.ConnectionError as e:
        print("连接超时")
    return html
def newFile():
    workbook=xlwt.Workbook(encoding="utf-8") #创建workbook对象
    worksheet = workbook.add_sheet('院校基本信息',cell_overwrite_ok=True)
    labellist = ['序号','院校名称','所在地','院校隶属','研究生院','自划线院校','院校简介','周边环境','院校领导','院校官网地址','院系设置','重点实验室','重点学科','可授予学位','硕士专业','博士专业','学费','调剂办法','奖贷学金','科研条件','网报公报','招生简章']
    for j in range(0,len(labellist)):
        worksheet.write(0,j,labellist[j])
    workbook.save('考研信息数据爬取结果.xls')
    return workbook

def savedata(worksheet,dataList):
    for j in range(0,len(dataList)):
        worksheet.write(dataList[0],j,dataList[j])
    
def getCondition(url,dataList):
    html=url_get(url)
    condition = html.xpath('//div[@class="container"]')     #condtion：校园简介节点  
    leader = condition[0].xpath('string(./div[2])')         #leader：学院领导节点下的所有字符串内容
    content = condition[0].xpath('string(./div[4])')        #content：学校简介节点下的所有字符串内容
    env = condition[0].xpath('string(./div[6])')            #env:周边环境节点下的所有字符串内容
    # env = env.replace('\r', '').replace('\t', '').replace('\n','').replace('\xa0','').replace('&nbsp;','').replace("\u00a0", "")
    env = re.sub(r'\s+',"",env)
    content = re.sub(r'\s+',"",content)
    leader = re.sub(r'\s+',"",leader)
    dataList.append(content)
    dataList.append(env)
    dataList.append(leader)
    # dataList.append(url)
    getOffice(dataList)        #从百度获取官网地址

def getSetting(url,dataList):
    str=""
    html=url_get(url)
    setting = html.xpath('//div[@class="container"]')
    # print(len(setting))
    xy =setting[0].xpath('./div[2]/ul/li/p/a/text()')
    http =setting[0].xpath('./div[2]/ul/li/p/a/@href')
    for i in range(0,len(xy)):
        str+=xy[i]
        if i<len(http):
            str+=http[i]
    str=re.sub(r'\s+',"",str)
    dataList.append(str)
    #实验室
    rea = setting[0].xpath('string(./div[4])')
    rea =re.sub(r'\s+',"",rea)    
    dataList.append(rea)
    #重点课程
    cor = setting[0].xpath('string(./div[6])')
    cor =re.sub(r'\s+',"",cor)    
    dataList.append(cor)
    #可授予学位
    stu = setting[0].xpath('string(./div[8])')
    stu =re.sub(r'\s+',"",stu)
    dataList.append(stu)

def getStu(url,dataList):
    html=url_get(url)
    stu =html.xpath('//div[@class="container"]')
    #硕士专业
    ss =stu[0].xpath('string(./div[2])')
    ss =re.sub(r'\s+',"",ss)
    dataList.append(ss)
    #博士专业
    bs =stu[0].xpath('string(./div[4])')
    bs =re.sub(r'\s+',"",bs)
    dataList.append(bs)

def getJoin(url,dataList):
    html = url_get(url)
    content = html.xpath('//div[@class="container"]')
    
def getFee(url,dataList):
    html = url_get(url)
    schoolLink = html.xpath('//div[@class="yxk-content"]')
    string = schoolLink[0].xpath('./p/text()')
    link = schoolLink[0].xpath('./p/a/@href')
    print(len(string))
    print(len(link))
    if len(string)!=0&len(link)!=0:
        data = string[0]+link[0]
    if len(string)!=0&len(link)==0:
        data = string[0]
    if len(link)!=0&len(string)==0:
        data =link[0]
    if len(link)==0&len(string)==0:
        data =""
    dataList.append(data)

def getOffice(dataList):
    url ="https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=monline_3_dg&wd="+dataList[1]
    html = url_get(url)
    OfficePath = html.xpath('//span[@class="c-showurl"]/text()')
    Office =""
    if len(OfficePath)!=0:
        Office  ="https://" + OfficePath[0] 
    dataList.append(Office)
def getdata(url,dataList):
    
    html = url_get(url)
    # print(text)
    schoolLink = html.xpath('//ul[@class="yxk-link-list clearfix"]')
    # print(len(schoolConditon))
    curl = schoolLink[0].xpath('./li[1]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    # print('conditionPage:'+curlc)
    #获得数据
    getCondition(curlc,dataList)
    curl = schoolLink[0].xpath('./li[2]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得数据
    getSetting(curlc,dataList)
    curl = schoolLink[0].xpath('./li[3]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得数据
    getStu(curlc,dataList)
    curl = schoolLink[0].xpath('./li[4]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得数据
    curl = schoolLink[0].xpath('./li[5]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得数据
    linkPath = html.xpath('//div[@class="yxk-other-link"]')
    Fee = linkPath[0].xpath('./a[3]/@href')
    curlc = 'https://yz.chsi.com.cn'  + Fee[0]
    #获得数据
    getFee(curlc,dataList)
    
def askurl(Number,url,filepage,efile):
    # request = urllib.request.Request(url,headers=head)
    # print(type(text))
    html = url_get(url) #获得xpath解析对象
    text2= etree.tostring(html,encoding="utf-8") #解析对象输出代码
    # print(type(text2))
    collegeList = html.xpath('//div[@class="yxk-table"]')    #寻找学校列表
    # print(len(collegeList))
    for i in collegeList:
        nameList = i.xpath('./table/tbody/tr') 
        # print(len(name))
    for i in nameList:
        dataList =[Number]
        Number+=1
        name = i.xpath('./td[1]/a/text()')        
        # name =name[0].replace('\r', '').replace('\t', '').replace('\n', '').replace('\xa0', ''.replace('&nbsp;','').replace("\u00a0", "")
        name=re.sub(r'\s+',"",name[0])
        surl = i.xpath('./td[1]/a/@href')
        address = i.xpath('./td[2]/text()')
        belong = i.xpath('./td[3]/text()')
        research = i.xpath('./td[4]/i/text()')
        examself = i.xpath('./td[5]/i/text()')
        surlc ='https://yz.chsi.com.cn'+ surl[0]
        dataList.append(name)
        # print(surlc)
        dataList.append(address[0])
        dataList.append(belong[0])
        if len(research) != 0:
            research ='是'
        else:
            research ='否'
        dataList.append(research)
        if len(examself) != 0:
            examself ='是'
        else:
            examself ='否'
        dataList.append(examself)
        getdata(surlc,dataList)
        print(dataList)
        savedata(filepage,dataList)
        efile.save('考研信息数据爬取结果.xls')
    return Number
def main():
    efile= newFile()
    filepage=efile.get_sheet('院校基本信息')
    Number =1
    url = "https://yz.chsi.com.cn/sch/search.do?ssdm=51&yxls=" #四川
    # url = "https://yz.chsi.com.cn/sch/search.do?ssdm=&yxls=" #全国
    # for i in range(0,10):     #全国
    for i in range(0,2):        #四川
        Number=askurl(Number,url +str(i*20),filepage,efile)
        
if __name__ =="__main__":
    main()

