
import requests
import xlwt


from bs4 import BeautifulSoup


#获取页面函数，以post方式请求
def getHtmlTextByPost(url,data):
    try:
        #设置请求头
        h = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36"}
        #r = requests.get(url,headers = h)
        r = requests.post(url,data,headers = h)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""

#以get方式请求
def getHtmlTextByGet(url):
    try:
        #设置请求头
        h = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36"}
        r = requests.get(url,headers = h)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""
#解析页面函数
def parsePage(page):
    try:
        soup = BeautifulSoup(page, "html.parser")
        if soup.body.text.find("前端") >= 0:
            return True
        else:
            return False
    except:
        print("解析页面数据时出错")
#保存为Excel
def saveAsExcel(name,list):
    wb = xlwt.Workbook(encoding='utf-8')
    sheet = wb.add_sheet(name,cell_overwrite_ok=True)

    for i in range(0,len(list)):
        for j in range(0,4):
            sheet.write(i, j, list[i][j])
            if list[i][2] != 0:
                sheet.write(i, j, list[i][j])
            else:
                if i != 2:
                    sheet.write(i, j, list[i][j])
    wb.save(name+".xls")
#main
def main():
    start_url = "http://www.job.cqu.edu.cn/jyxt/zczphxxlistlogin.do"
    target_url = "http://www.job.cqu.edu.cn/jyxt/"
    max_page = 134
    page = 3
    article_list = [["企业名称","时间","地点","链接"]]
    date = ""
    address = ""
    name = ""
    for i in range(5):
        postdata = {
            "zphlx": 1,
            "pages.pageSize": 30,
            "pages.currentPage": 1,
            "pages.maxPage": 134
        }
        #设置当前页码，以获取后台数据
        postdata["pages.currentPage"] = i+1
        html = getHtmlTextByPost(start_url,postdata)
        soup = BeautifulSoup(html, "html.parser")

        print("解析链接")
        for article in soup.findAll("tr",{'class':'con'}):

            url = target_url + str(article.contents[1].contents[1])[9:38]
            html = getHtmlTextByGet(url)
            if parsePage(html) == True:
                print(article.contents[1].text)
                name = article.contents[1].text
                address = article.contents[3].text
                date = article.contents[7].text
                company = [name,date,address,url]
                article_list.append(company)
    saveAsExcel("重大春招前端",article_list)
main()
