# -*- coding: utf-8 -*-
# import urllib.request
# from bs4 import BeautifulSoup
import requests
import re
from lxml import etree
import xlwt
import time
import os
from docx import Document
from docx.shared import Inches
g_ssCounter =1
g_bsCounter =1
def clear():
    os.system("clear")
# from html.parser import HTMLParser
# from lxml.html import fromstring, tostring
def url_get(url):
    try:
        head={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0"}
        response = requests.get(url,headers=head,timeout=15)
        text = response.content.decode("utf-8")
        html = etree.HTML(text)
    except UnboundLocalError:
        print("连接超时")
    return html
def newFile():
    workbook=xlwt.Workbook(encoding="utf-8") #创建workbook对象
    worksheet = workbook.add_sheet('院校基本信息',cell_overwrite_ok=True)
    worksheet2 = workbook.add_sheet('硕士专业',cell_overwrite_ok=True)
    worksheet3 = workbook.add_sheet('博士专业',cell_overwrite_ok=True)
    labellist = ['序号','院校名称','所在地','院校隶属','研究生院','自划线院校','院校简介','周边环境','院校领导','院校官网地址','院系设置','重点实验室','重点学科','可授予学位','学费']
    for j in range(0,len(labellist)):
        worksheet.write(0,j,labellist[j])
    labellist2 = ['序号','院校序号','院校名称','学科大类','专业名称','专业代码']
    for j in range(0,len(labellist2)):
        worksheet2.write(0,j,labellist2[j])
        worksheet3.write(0,j,labellist2[j])
    workbook.save('考研信息数据爬取结果.xls')
    return workbook
def save_as_doc(url,dataList):
    #计划引入datalist按学校名称保存
    if not os.path.exists('doc文件下载'):
        os.mkdir('doc文件下载') 
    if not os.path.exists('doc文件下载\\'+dataList[1]):
        os.mkdir('doc文件下载\\'+dataList[1]) 
    html=url_get(url)
    page = html.xpath('//div[@class="container"]')
    title = page[0].xpath('./h2/text()')
    content = page[0].xpath('./div/p')
    if not os.path.exists("doc文件下载\\"+dataList[1]+"\\"+title[0]+".doc"):
        document = Document()
        document.add_heading(title[0], 0)
        for i in range(0,len(content)):
            word = content[i].xpath('string()')
            word = re.sub(r'\s+',"",word)
            p = document.add_paragraph("    "+word)
        try:
            document.save(".\\doc文件下载\\"+dataList[1]+"\\"+title[0]+".doc")
        except OSError :
            print("有个文件没有保存成功，请手动前往现场查看问题")

def savedata(worksheet,dataList):
    for j in range(0,len(dataList)):
        worksheet.write(dataList[0],j,dataList[j])
    
def getCondition(url,dataList):
    html=url_get(url)
    condition = html.xpath('//div[@class="container"]')     #condtion：校园简介节点  
    leader = condition[0].xpath('string(./div[2])')         #leader：学院领导节点下的所有字符串内容
    content = condition[0].xpath('string(./div[4])')        #content：学校简介节点下的所有字符串内容
    env = condition[0].xpath('string(./div[6])')            #env:周边环境节点下的所有字符串内容
    # env = env.replace('\r', '').replace('\t', '').replace('\n','').replace('\xa0','').replace('&nbsp;','').replace("\u00a0", "")
    env = re.sub(r'\s+',"",env)
    content = re.sub(r'\s+',"",content)
    leader = re.sub(r'\s+',"",leader)
    dataList.append(content)
    dataList.append(env)
    dataList.append(leader)
    # dataList.append(url)
    getOffice(dataList)        #从百度获取官网地址

def getSetting(url,dataList):
    str=""
    html=url_get(url)
    setting = html.xpath('//div[@class="container"]')
    # print(len(setting))
    #院系设置
    xy =setting[0].xpath('string(./div[2])')
    # 尝试将连接和字符串组合起来但是失败了的远古代码
    # http =setting[0].xpath('./div[2]/ul/li/p/a/@href')
    # for i in range(0,len(xy)):
    #     str+=xy[i]
    #     if i<len(http):
    #         str+=http[i]
    # str=re.sub(r'\s+',"",str)
    str=re.sub(r'\s+',"",xy)
    dataList.append(str)
    #实验室
    rea = setting[0].xpath('string(./div[4])')
    rea =re.sub(r'\s+',"",rea)    
    dataList.append(rea)
    #重点课程
    cor = setting[0].xpath('string(./div[6])')
    cor =re.sub(r'\s+',"",cor)    
    dataList.append(cor)
    #可授予学位
    stu = setting[0].xpath('string(./div[8])')
    stu =re.sub(r'\s+',"",stu)
    dataList.append(stu)

def savedata2(workSheet,dataList):    
    global g_ssCounter
    for j in range(0,len(dataList)):
        workSheet.write(g_ssCounter,j,dataList[j])
    g_ssCounter+=1
def savedata3(workSheet,dataList):    
    global g_bsCounter
    for j in range(0,len(dataList)):
        workSheet.write(g_bsCounter,j,dataList[j])
    g_bsCounter+=1
def getStu(url,dataList,efile):
    html=url_get(url)
    stu =html.xpath('//div[@class="tab-container zyk-zyfb-tab yxk-tab"]')
    if(len(stu)!=0):
        #硕士专业
        filepage=efile.get_sheet('硕士专业')
        sl = stu[0].xpath('./div[1]//div/a/text()')
        ss =stu[0].xpath('./div[2]/div')
        # sl =re.sub(r'\s+',"",sl)
        for i in range(0,len(ss)):
            ssname = ss[i].xpath('./ul/li/text()')            
            nameText =re.sub(r'\s+',"",ssname[0])
            # print(nameText)
            if nameText =="":
                ssname = ss[i].xpath('./ul/li/a/text()')
            for j in range(0,len(ssname)):
                ssl =re.sub(r'\s+',"",ssname[j])
                index = ssl[-7:-1]
                ssl = ssl[0:-8]
                SDL = [g_ssCounter,dataList[0],dataList[1],sl[i],ssl,index]
                savedata2(filepage,SDL)
        # ss =re.sub(r'\s+',"",ss) 远古代码
        # print(len(ss))
    if(len(stu)>1):
        #博士专业
        filepage=efile.get_sheet('博士专业')
        bl = stu[1].xpath('./div[1]//div/a/text()')
        bs =stu[1].xpath('./div[2]/div')        
        # bl =re.sub(r'\s+',"",bl)
        for i in range(0,len(bs)):
            bsname = bs[i].xpath('./ul/li/text()')
            nameText =re.sub(r'\s+',"",bsname[0])
            # print(nameText)
            if nameText =="":
                bsname = bs[i].xpath('./ul/li/a/text()')
            for j in range(0,len(bsname)):
                bsl =re.sub(r'\s+',"",bsname[j])
                index = bsl[-7:-1]
                bsl = bsl[0:-8]
                SDL = [g_bsCounter,dataList[0],dataList[1],bl[i],bsl,index]
                savedata3(filepage,SDL)
        # print(len(bs))
# def getJoin(url,dataList):
#     html = url_get(url)
#     content = html.xpath('//div[@class="container"]')
    
def getFee(url,dataList):
    html = url_get(url)
    schoolLink = html.xpath('//div[@class="yxk-content"]')
    string = schoolLink[0].xpath('string()')
    # link = schoolLink[0].xpath('./p/a/@href')
    # print(len(string))
    # print(len(link))
    data = re.sub(r'\s+',"",string)
    # 尝试将连接和字符串组合起来但是失败了的远古代码
    # if (len(string)!=0&len(link)!=0):
    #     data = string[0]+link[0]
    # if (len(string)!=0&len(link)==0):
    #     data = string[0]
    # if (len(link)!=0&len(string)==0):
    #     data = link[0]
    # if (len(link)==0&len(string)==0):
    #     data =""
    dataList.append(data)

def getOffice(dataList):
    url ="https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=monline_3_dg&wd="+dataList[1]
    html = url_get(url)
    OfficePath = html.xpath('//a[@class="c-showurl c-color-gray"]/text()')
    Office ="获取信息失败"
    if len(OfficePath)!=0:
        
        Office  ="https://" + OfficePath[0] 
    dataList.append(Office)

def getdata(url,dataList,efile):
    
    html = url_get(url)
    # print(text)
    schoolLink = html.xpath('//ul[@class="yxk-link-list clearfix"]')
    # print(len(schoolConditon))
    curl = schoolLink[0].xpath('./li[1]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    # print('conditionPage:'+curlc)
    #获得校园简介数据
    getCondition(curlc,dataList)
    curl = schoolLink[0].xpath('./li[2]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得院系设置数据
    getSetting(curlc,dataList)
    curl = schoolLink[0].xpath('./li[3]/a/@href')
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    #获得专业相关数据
    getStu(curlc,dataList,efile)
    #获得学费数据
    linkPath = html.xpath('//div[@class="yxk-other-link"]')
    Fee = linkPath[0].xpath('./a[3]/@href')
    curlc = 'https://yz.chsi.com.cn'  + Fee[0]
    # print(curlc) #测试   
    getFee(curlc,dataList)
    #获得调剂办法数据
    acp =schoolLink[0].xpath('//div[@class="yxk-column-right"]')
    curl = acp[2].xpath('//div[@class="yxk-overflow"]/a/@href')
    aname = acp[2].xpath('//div[@class="yxk-overflow"]/a/text()')
    for i in range(0,len(aname)):
        aName  = re.sub(r'\s+',"",aname[i])
    for i in range(0,len(curl)):
        curlc = 'https://yz.chsi.com.cn'  + curl[i]
        # dataList.append(curlc)
        save_as_doc(curlc,dataList)
    #获取奖学金数据
    scship = linkPath[0].xpath('./a[4]/@href')
    curlc = 'https://yz.chsi.com.cn'  + scship[0]
    for i in range(0,len(scship)):
        curlc = 'https://yz.chsi.com.cn'  + scship[i]
        # dataList.append(curlc)
        save_as_doc(curlc,dataList)
    #获取科研条件
    reacon =linkPath[0].xpath('./a[2]/@href')
    curlc = 'https://yz.chsi.com.cn'  + reacon[0]
    for i in range(0,len(reacon)):
        curlc = 'https://yz.chsi.com.cn'  + reacon[i]
        # dataList.append(curlc)
        save_as_doc(curlc,dataList)
    #获取招生简章
    left =schoolLink[0].xpath('//div[@class="yxk-column-left"]')
    curl = left[2].xpath('//div[@class="yxk-overflow"]/a/@href')
    zname = left[2].xpath('//div[@class="yxk-overflow"]/a/text()')
    for i in range(0,len(zname)):
        zName  = re.sub(r'\s+',"",zname[i])
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    for i in range(0,len(curl)):
        curlc = 'https://yz.chsi.com.cn'  + curl[i]
        # dataList.append(curlc)
        save_as_doc(curlc,dataList)
    #获取网报公告
    curl = left[1].xpath('//div[@class="yxk-overflow"]/a/@href')
    iname = left[1].xpath('//div[@class="yxk-overflow"]/a/text()')
    for i in range(0,len(iname)):
        iName  = re.sub(r'\s+',"",iname[i])
    curlc = 'https://yz.chsi.com.cn'  + curl[0]
    for i in range(0,len(curl)):
        curlc = 'https://yz.chsi.com.cn'  + curl[i]
        # dataList.append(curlc)
        save_as_doc(curlc,dataList)
def askurl(Number,url,efile,localtime):
    # request = urllib.request.Request(url,headers=head)
    # print(type(text))
    html = url_get(url) #获得xpath解析对象
    text2= etree.tostring(html,encoding="utf-8") #解析对象输出代码
    # print(type(text2))
    collegeList = html.xpath('//div[@class="yxk-table"]')    #寻找学校列表
    # print(len(collegeList))
    for i in collegeList:
        nameList = i.xpath('./table/tbody/tr') 
        # print(len(name))
    for i in nameList:
        dataList =[Number]
        Number+=1
        name = i.xpath('./td[1]/a/text()')        
        # name =name[0].replace('\r', '').replace('\t', '').replace('\n', '').replace('\xa0', ''.replace('&nbsp;','').replace("\u00a0", "")
        name=re.sub(r'\s+',"",name[0])
        surl = i.xpath('./td[1]/a/@href')
        address = i.xpath('./td[2]/text()')
        belong = i.xpath('./td[3]/text()')
        research = i.xpath('./td[4]/i/text()')
        examself = i.xpath('./td[5]/i/text()')
        surlc ='https://yz.chsi.com.cn'+ surl[0]
        dataList.append(name)
        # print(surlc)
        dataList.append(address[0])
        dataList.append(belong[0])
        if len(research) != 0:
            research ='是'
        else:
            research ='否'
        dataList.append(research)
        if len(examself) != 0:
            examself ='是'
        else:
            examself ='否'            
        dataList.append(examself)
        print("正在获取第"+str(Number-1)+"所学校的数据....")
        getdata(surlc,dataList,efile)
        # print(dataList)
        print("第"+str(Number-1)+"所学校("+dataList[1]+")的数据获取成功\n")
        filepage1=efile.get_sheet('院校基本信息')
        savedata(filepage1,dataList)
        fileName=localtime+" 考研信息数据爬取结果.xls"
        # clear()
        efile.save(fileName)
    return Number
def main():
    efile= newFile()
    Number =1
    localtime = time.strftime("%Y-%m-%d %H %M %S", time.localtime()) 
    url = "https://yz.chsi.com.cn/sch/search.do?ssdm=51&start=" #四川
    # url = "https://yz.chsi.com.cn/sch/search.do?ssdm=&yxls=" #全国
    # for i in range(0,43):     #全国
        # Number=askurl(Number,url+str(i*20),efile,localtime)
    for i in range(0,2):        #四川
        Number=askurl(Number,url+str(i*20),efile,localtime)
        
if __name__ =="__main__":
    main()

