#!/usr/bin/env python
# coding: utf-8

# In[ ]:


from requests_html import HTMLSession
import requests_html
import pandas as pd
import urllib.parse

session = HTMLSession()

# 1.学校要闻
for i in range(1,100):
    xxyw = session.get('https://www.nfu.edu.cn/xxyw/index'+str(i)+'.htm')
    if xxyw.status_code != 200:
        print(i)
        break

xxyw_url_group = ['https://www.nfu.edu.cn/xxyw/index'+str(i)+'.htm' for i in range(1,91)]
xxyw_url_group.insert(0,'https://www.nfu.edu.cn/xxyw/index.htm')

for url in xxyw_url_group:
    r = session.get(url)
    path = urllib.parse.urlparse(url).path
    with open ('html_out/'+path, encoding = "utf8", mode = "a") as fp:
        fp.write(r.html.html)
        
nfu_urlparse = urllib.parse.urlparse(xxyw.url)
nfu_urlparse

dict_xpath = {
    '链接_xpath':'//div[@class="news_title"]/a/@href',
    '标题_xpath':'//div[@class="news_title"]/a/@title',
    '日期_xpath':'//font[@class="right-more"]/text()'
}

def pages_content_url(parsed):
    list_URL  = [urllib.parse.urlunparse                 ([nfu_urlparse.scheme,nfu_urlparse.netloc,'/'+ nfu_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])                 for detail_url in parsed.xpath(dict_xpath['链接_xpath'])]
    return list_URL

import os

list_df = []


files= os.listdir('html_out/xxyw/')
print(files)

for html in files:
    with open('html_out/xxyw/'+html,encoding='utf8',mode='r') as fp:
        html_load = fp.read()
        parsed = requests_html.soup_parse(html_load)
        list_URL = pages_content_url(parsed)
        
        df = pd.DataFrame( {
         "标题": parsed.xpath(dict_xpath['标题_xpath']),
         "链接": list_URL,
         "日期": parsed.xpath(dict_xpath['日期_xpath']),
        } )
        list_df.append(df)

        
# 拼接 concat    
# 排序 sort_values
df_all = pd.concat(list_df).reset_index().sort_values(by='日期',ascending=False)
display(df_all)    

with pd.ExcelWriter('data_out/nfu_官网.xlsx',mode='w',engine="openpyxl") as writer:  
            df_all.to_excel(writer, sheet_name='学校要闻')
        
# 2.校园动态
for i in range(1,100):
    xydt = session.get('https://www.nfu.edu.cn/xydt/index'+str(i)+'.htm')
    if xydt.status_code != 200:
        print(i)
        break
        
xydt_url_group = ['https://www.nfu.edu.cn/xydt/index'+str(i)+'.htm' for i in range(1,84)]
xydt_url_group.insert(0,'https://www.nfu.edu.cn/xydt/index.htm')

for url in xydt_url_group:
    r = session.get(url)
    path = urllib.parse.urlparse(url).path
    with open ('html_out/'+path, encoding = "utf8", mode = "a") as fp:
        fp.write(r.html.html)

nfu_urlparse = urllib.parse.urlparse(xydt.url)
nfu_urlparse

dict_xpath = {
    '链接_xpath':'//div[@class="news_title"]/a/@href',
    '标题_xpath':'//div[@class="news_title"]/a/@title',
    '日期_xpath':'//font[@class="right-more"]/text()'
}
def pages_content_url(parsed):
    list_URL  = [urllib.parse.urlunparse                 ([nfu_urlparse.scheme,nfu_urlparse.netloc,'/'+ nfu_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])                 for detail_url in parsed.xpath(dict_xpath['链接_xpath'])]
    return list_URL

import os

list_df = []


files= os.listdir('html_out/xydt/')
print(files)

for html in files:
    with open('html_out/xydt/'+html,encoding='utf8',mode='r') as fp:
        html_load = fp.read()
        parsed = requests_html.soup_parse(html_load)
        list_URL = pages_content_url(parsed)
        
        df = pd.DataFrame( {
         "标题": parsed.xpath(dict_xpath['标题_xpath']),
         "链接": list_URL,
         "日期": parsed.xpath(dict_xpath['日期_xpath']),
        } )
        list_df.append(df)

        
# 拼接 concat    
# 排序 sort_values
df_all = pd.concat(list_df).reset_index().sort_values(by='日期',ascending=False)
display(df_all)    

with pd.ExcelWriter('data_out/nfu_官网.xlsx',mode='a',engine="openpyxl") as writer:  
            df_all.to_excel(writer, sheet_name='校园动态')
        
# 3.通知公告
for i in range(1,100):
    tzgg = session.get('https://www.nfu.edu.cn/tzgg/index'+str(i)+'.htm')
    if tzgg.status_code != 200:
        print(i)
        break
        
tzgg_url_group = ['https://www.nfu.edu.cn/tzgg/index'+str(i)+'.htm' for i in range(1,35)]
tzgg_url_group.insert(0,'https://www.nfu.edu.cn/tzgg/index.htm')

for url in tzgg_url_group:
    r = session.get(url)
    path = urllib.parse.urlparse(url).path
    with open ('html_out/'+path, encoding = "utf8", mode = "a") as fp:
        fp.write(r.html.html)
        
nfu_urlparse = urllib.parse.urlparse(tzgg.url)
nfu_urlparse

dict_xpath = {
    '链接_xpath':'//div[@class="news_title"]/a/@href',
    '标题_xpath':'//div[@class="news_title"]/a/@title',
    '日期_xpath':'//font[@class="right-more"]/text()'
}
def pages_content_url(parsed):
    list_URL  = [urllib.parse.urlunparse                 ([nfu_urlparse.scheme,nfu_urlparse.netloc,'/'+ nfu_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])                 for detail_url in parsed.xpath(dict_xpath['链接_xpath'])]
    return list_URL

import os

list_df = []


files= os.listdir('html_out/tzgg/')
print(files)

for html in files:
    with open('html_out/tzgg/'+html,encoding='utf8',mode='r') as fp:
        html_load = fp.read()
        parsed = requests_html.soup_parse(html_load)
        list_URL = pages_content_url(parsed)
        
        df = pd.DataFrame( {
         "标题": parsed.xpath(dict_xpath['标题_xpath']),
         "链接": list_URL,
         "日期": parsed.xpath(dict_xpath['日期_xpath']),
        } )
        list_df.append(df)

        
# 拼接 concat    
# 排序 sort_values
df_all = pd.concat(list_df).reset_index().sort_values(by='日期',ascending=False)
display(df_all)    

with pd.ExcelWriter('data_out/nfu_官网.xlsx',mode='a',engine="openpyxl") as writer:  
            df_all.to_excel(writer, sheet_name='通知公告')
        
# 4.招投标
for i in range(1,100):
    ztb = session.get('https://www.nfu.edu.cn/ztb/index'+str(i)+'.htm')
    if ztb.status_code != 200:
        print(i)
        break
        
ztb_url_group = ['https://www.nfu.edu.cn/ztb/index'+str(i)+'.htm' for i in range(1,22)]
ztb_url_group.insert(0,'https://www.nfu.edu.cn/ztb/index.htm')

for url in ztb_url_group:
    r = session.get(url)
    path = urllib.parse.urlparse(url).path
    with open ('html_out/'+path, encoding = "utf8", mode = "a") as fp:
        fp.write(r.html.html)
        
nfu_urlparse = urllib.parse.urlparse(ztb.url)
nfu_urlparse

dict_xpath = {
    '链接_xpath':'//div[@class="news_title"]/a/@href',
    '标题_xpath':'//div[@class="news_title"]/a/@title',
    '日期_xpath':'//font[@class="right-more"]/text()'
}
def pages_content_url(parsed):
    list_URL  = [urllib.parse.urlunparse                 ([nfu_urlparse.scheme,nfu_urlparse.netloc,'/'+ nfu_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])                 for detail_url in parsed.xpath(dict_xpath['链接_xpath'])]
    return list_URL

import os

list_df = []


files= os.listdir('html_out/ztb/')
print(files)

for html in files:
    with open('html_out/ztb/'+html,encoding='utf8',mode='r') as fp:
        html_load = fp.read()
        parsed = requests_html.soup_parse(html_load)
        list_URL = pages_content_url(parsed)
        
        df = pd.DataFrame( {
         "标题": parsed.xpath(dict_xpath['标题_xpath']),
         "链接": list_URL,
         "日期": parsed.xpath(dict_xpath['日期_xpath']),
        } )
        list_df.append(df)

        
# 拼接 concat    
# 排序 sort_values
df_all = pd.concat(list_df).reset_index().sort_values(by='日期',ascending=False)
display(df_all)    

with pd.ExcelWriter('data_out/nfu_官网.xlsx',mode='a',engine="openpyxl") as writer:  
            df_all.to_excel(writer, sheet_name='招投标')
        
# 5.高校动态
for i in range(1,100):
    gjdt = session.get('https://www.nfu.edu.cn/gjdt/index'+str(i)+'.htm')
    if gjdt.status_code != 200:
        print(i)
        break
        
gjdt_url_group = ['https://www.nfu.edu.cn/gjdt/index'+str(i)+'.htm' for i in range(1,26)]
gjdt_url_group.insert(0,'https://www.nfu.edu.cn/gjdt/index.htm')

for url in gjdt_url_group:
    r = session.get(url)
    path = urllib.parse.urlparse(url).path
    with open ('html_out/'+path, encoding = "utf8", mode = "a") as fp:
        fp.write(r.html.html)
        
nfu_urlparse = urllib.parse.urlparse(gjdt.url)
nfu_urlparse

dict_xpath = {
    '链接_xpath':'//div[@class="news_title"]/a/@href',
    '标题_xpath':'//div[@class="news_title"]/a/@title',
    '日期_xpath':'//font[@class="right-more"]/text()'
}
def pages_content_url(parsed):
    list_URL  = [urllib.parse.urlunparse                 ([nfu_urlparse.scheme,nfu_urlparse.netloc,'/'+ nfu_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])                 for detail_url in parsed.xpath(dict_xpath['链接_xpath'])]
    return list_URL

import os

list_df = []


files= os.listdir('html_out/gjdt/')
print(files)

for html in files:
    with open('html_out/gjdt/'+html,encoding='utf8',mode='r') as fp:
        html_load = fp.read()
        parsed = requests_html.soup_parse(html_load)
        list_URL = pages_content_url(parsed)
        
        df = pd.DataFrame( {
         "标题": parsed.xpath(dict_xpath['标题_xpath']),
         "链接": list_URL,
         "日期": parsed.xpath(dict_xpath['日期_xpath']),
        } )
        list_df.append(df)

        
# 拼接 concat    
# 排序 sort_values
df_all = pd.concat(list_df).reset_index().sort_values(by='日期',ascending=False)
display(df_all)    

with pd.ExcelWriter('data_out/nfu_官网.xlsx',mode='a',engine="openpyxl") as writer:  
            df_all.to_excel(writer, sheet_name='高教动态')

