#!/usr/bin/env python
# coding: utf-8

# # HTML解析-南方学院新闻 

# In[1]:


from requests_html import HTMLSession
import requests_html
import pandas as pd
import urllib.parse


# In[13]:


# A1  nfu.edu.cn 
session = HTMLSession()
r = session.get("https://www.nfu.edu.cn/mtnf/index.htm")
r_xxyw = session.get("https://www.nfu.edu.cn/xxyw/index.htm")
r_xydt = session.get("https://www.nfu.edu.cn/xydt/index.htm")
r_tzgg = session.get("https://www.nfu.edu.cn/tzgg/index.htm")
r_ztb = session.get("https://www.nfu.edu.cn/ztb/index.htm")
r_gjdt = session.get("https://www.nfu.edu.cn/gjdt/index.htm")


# ## 学校要闻

# In[14]:


r_xxyw = session.get("https://www.nfu.edu.cn/xxyw/index.htm")
# 存
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "w") as xxyw:
    xxyw.write(r_xxyw.html.html)
# 读
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "r") as xxyw:
    html_xxyw = xxyw.read()


# In[15]:


# 解析
parsed_xxyw = requests_html.soup_parse(html_xxyw)
parsed_xxyw


# In[16]:


# 解析
base_url = r_xxyw.url
xxyw_urlparse = urllib.parse.urlparse(base_url)
xxyw_urlparse


# In[17]:


# 重组链接
list_URL2  = [urllib.parse.urlunparse([xxyw_urlparse.scheme,xxyw_urlparse.netloc,'/'+ xxyw_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])for detail_url in parsed_xxyw.xpath('//div[@class="news_title"]/a/@href')]
list_URL2


# In[18]:


# 输出结果
# B-D-1 pd.DataFrame 建构，pandas课有教
df2 = pd.DataFrame( {
         "标题": parsed_xxyw.xpath('//div[@class="news_title"]/a/@title'),
         "链结": list_URL2,
         "日期": parsed_xxyw.xpath('//font[@class="right-more"]/text()'),
     } )
df2


# In[46]:





# ## 校园动态

# In[19]:


r_xydt = session.get("https://www.nfu.edu.cn/xydt/index.htm")


# In[20]:


r_xydt = session.get("https://www.nfu.edu.cn/xydt/index.htm")
# 存
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "w") as xydt:
    xydt.write(r_xydt.html.html)
# 读
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "r") as xydt:
    html_xydt = xydt.read()


# In[21]:


# 解析
parsed_xydt = requests_html.soup_parse(html_xydt)
parsed_xydt


# In[22]:


# 解析
base_url = r_xydt.url
xydt_urlparse = urllib.parse.urlparse(base_url)
xydt_urlparse


# In[23]:


# 重组链接
list_URL3  = [urllib.parse.urlunparse([xydt_urlparse.scheme,xydt_urlparse.netloc,'/'+ xydt_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])for detail_url in parsed_xydt.xpath('//div[@class="news_title"]/a/@href')]
list_URL3


# In[25]:


# 输出结果
# B-D-1 pd.DataFrame 建构，pandas课有教
df_xydt = pd.DataFrame( {
         "标题": parsed_xydt.xpath('//div[@class="news_title"]/a/@title'),
         "链结": list_URL3,
         "日期": parsed_xydt.xpath('//font[@class="right-more"]/text()'),
     } )
df_xydt


# In[48]:





# ## 通知公告

# In[26]:


r_tzgg = session.get("https://www.nfu.edu.cn/tzgg/index.htm")


# In[27]:


# 存
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "w") as tzgg:
    tzgg.write(r_tzgg.html.html)
# 读
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "r") as tzgg:
    html_tzgg = tzgg.read()


# In[28]:


# 解析
parsed_tzgg = requests_html.soup_parse(html_tzgg)
parsed_tzgg


# In[29]:


# 解析
base_url = r_tzgg.url
tzgg_urlparse = urllib.parse.urlparse(base_url)
tzgg_urlparse


# In[30]:


# 重组链接
list_URL4  = [urllib.parse.urlunparse([tzgg_urlparse.scheme,tzgg_urlparse.netloc,'/'+ tzgg_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])for detail_url in parsed_tzgg.xpath('//div[@class="news_title"]/a/@href')]
list_URL4


# In[31]:


# 输出结果
# B-D-1 pd.DataFrame 建构，pandas课有教
df_tzgg = pd.DataFrame( {
         "标题": parsed_tzgg.xpath('//div[@class="news_title"]/a/@title'),
         "链结": list_URL4,
         "日期": parsed_tzgg.xpath('//font[@class="right-more"]/text()'),
     } )
df_tzgg


# ## 招投标

# In[ ]:


r_ztb = session.get("https://www.nfu.edu.cn/ztb/index.htm")


# In[32]:


# 存
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "w") as ztb:
    ztb.write(r_ztb.html.html)
# 读
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "r") as ztb:
    html_ztb = ztb.read()


# In[35]:


# 解析
parsed_ztb = requests_html.soup_parse(html_ztb)
parsed_ztb


# In[36]:


# 解析
base_url = r_ztb.url
ztb_urlparse = urllib.parse.urlparse(base_url)
ztb_urlparse


# In[37]:


# 重组链接
list_URL5  = [urllib.parse.urlunparse([ztb_urlparse.scheme,ztb_urlparse.netloc,'/'+ ztb_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])for detail_url in parsed_ztb.xpath('//div[@class="news_title"]/a/@href')]
list_URL5


# In[38]:


# 输出结果
# B-D-1 pd.DataFrame 建构，pandas课有教
df_ztb = pd.DataFrame( {
         "标题": parsed_ztb.xpath('//div[@class="news_title"]/a/@title'),
         "链结": list_URL5,
         "日期": parsed_ztb.xpath('//font[@class="right-more"]/text()'),
     } )
df_ztb


# ## 高教动态

# In[39]:


r_gjdt = session.get("https://www.nfu.edu.cn/gjdt/index.htm")


# In[40]:


# 存
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "w") as gjdt:
    gjdt.write(r_gjdt.html.html)
# 读
with open ("html_out/_nfu_文学与传媒学院.html", encoding = "utf8", mode = "r") as gjdt:
    html_gjdt = gjdt.read()


# In[41]:


# 解析
parsed_gjdt = requests_html.soup_parse(html_gjdt)
parsed_gjdt


# In[42]:


# 解析
base_url = r_gjdt.url
gjdt_urlparse = urllib.parse.urlparse(base_url)
gjdt_urlparse


# In[43]:


# 重组链接
list_URL6  = [urllib.parse.urlunparse([gjdt_urlparse.scheme,gjdt_urlparse.netloc,'/'+ gjdt_urlparse.path.split('/')[1] +'/' + detail_url,'','',''])for detail_url in parsed_gjdt.xpath('//div[@class="news_title"]/a/@href')]
list_URL6


# In[44]:


# 输出结果
# B-D-1 pd.DataFrame 建构，pandas课有教
df_gjdt = pd.DataFrame( {
         "标题": parsed_gjdt.xpath('//div[@class="news_title"]/a/@title'),
         "链结": list_URL6,
         "日期": parsed_gjdt.xpath('//font[@class="right-more"]/text()'),
     } )
df_gjdt


# In[52]:


# 输出excel表格
writer = pd.ExcelWriter("data_out/nfu_文学与传媒学院.xlsx")
df.to_excel(writer, sheet_name="媒体报道")
df_xydt.to_excel(writer, sheet_name="校园动态")
df2.to_excel(writer, sheet_name="学校要闻")
df_tzgg.to_excel(writer, sheet_name="通知公告")
df_ztb.to_excel(writer, sheet_name="招投标")
df_gjdt.to_excel(writer, sheet_name="高教动态")
writer.save()


# In[ ]:




