import urllib.request as req
from bs4 import BeautifulSoup
import time


url = 'http://www.gxstnu.edu.cn/xxxw.htm'


# 第一页：http://www.gxstnu.edu.cn/xxxw.htm
# 下一页：http://www.gxstnu.edu.cn/xxxw/301.htm
#                             <a href="300.htm">下页</a>
# 共302页，每页减1

#
# # 获取链接
def get_url(url):
    # 返回每页新闻的链接（列表）和下一页的链接
    # 传入每个页面的url获取每页中新闻的url链接，存入列表
    response = req.urlopen(url)
    soup = BeautifulSoup(response, 'lxml')
    time.sleep(0.5)
    box = soup.find(name='div', attrs={"class": "list_box"})
    lis = box.find_all(name='li')
    page_news_link = []
    for li in lis:
        a = li.find(name='a')
        # 拼接url
        link = req.urljoin(url, a['href'])
        page_news_link.append(link)
    # 获取下一页链接
    try:
        span = soup.find(name='span', attrs={'class': 'p_pages'})
        next_page = span.find(name='span', attrs={'class': 'p_next'})
        a = next_page.find(name='a')
        next_url = req.urljoin(url, a['href'])
    except Exception as e:
        return
    return page_news_link, next_url


# 获取新闻内容
def get_content(url):
    # 返回标题和段落内容，由于点击量是用javaScript动态生成的这种方式获取不了
    response = req.urlopen(url)
    soup = BeautifulSoup(response, 'lxml', from_encoding='utf-8')
    content_div = soup.find(name='div', attrs={'class': 'c-content'})
    time.sleep(0.5)
    title = content_div.find('h1').text
    news_div = content_div.find('div', {'class': 'v_news_content'})
    news_p = news_div.find_all('p')
    content = ''
    for i in news_p:
        if i.text:
            content = content + str(i.text).replace(u'\xa0', '').replace(u'\0xa1', u'').replace(u'\0x80', u'')
    return title, content


def get_txt(url):
    try:
        page_news_link, next_url = get_url(url)
        for link in page_news_link:
            title, content = get_content(link)
            f = open(r'D:\PythonProject\爬虫\someSpiders\news.txt', 'a', encoding='utf-8')
            f.write(title+'|'+content+'\n')
            f.close()
            print(title)
        return get_txt(next_url)
    except Exception as e:
        print(e)


if __name__ == '__main__':
    get_txt(url)
    # 查看数据
    f = open(r'D:\PythonProject\爬虫\someSpiders\news.txt', 'r', encoding='utf-8')
    data = f.readlines()
    for i in data:
        print(i)
    f.close()
