import requests
import re
import os
import time
from bs4 import BeautifulSoup
from datetime import datetime





# 创建文件夹和文件
def mkdir(newList):

    now = datetime.now().strftime("%Y-%m-%d")  # 获取当前时间戳
    dt = int(time.mktime(time.strptime(now, "%Y-%m-%d")))
    fapath = "./news" + "_" + str(dt)
    # 创建父文件夹
    folder = os.path.exists(fapath)
    if not folder:
        os.makedirs(fapath)

    for new in newList:
        path = fapath + "/" + new["title"]
        folder = os.path.exists(path)
        if not folder:
            os.makedirs(path)
        # print(path + "/content.html")
        f = open(path + "/content.html", "w", encoding='utf-8')
        f.write("标题：" + new["title"] + "<br />")
        f.write("日期：" + new["date"] + "<br />")
        f.write("作者：" + new["author"] + "<br />")
        f.write("来源：" + new["source"] + "<br />")
        f.write(new["content"])


# 筛选时间
def timeFilter(li, dates):
    date = datetime.strptime(li.find("i").text, '%Y-%m-%d')  # 获取时间

    startDate = datetime.strptime(dates[0], '%Y-%m-%d')
    endDate = datetime.strptime(dates[1], '%Y-%m-%d')

    return (date > startDate and date < endDate)



'''
进入二级url，筛选关键词
new = {
    "title": li["title"],
    "date": li["date"],
    "author": author,
    "source": source,
    "content": content,
}

[{}, {}, {}...]

'''
def openSecondUrl(lis, keys):

    newsList = []

    for li in lis:
        url = urlRoot + li["url"]
        response = requests.get(url)
        response.encoding = 'utf-8'
        html_text = response.text
        soup = BeautifulSoup(html_text, 'html.parser')

        try:
            info = soup.findAll("div", {"class": "art-tit wow fadeInUp animated"})[0].find("i").text
            # 获取来源
            t = re.search(r"资料来源： \S+", info).span()
            source = info[t[0]: t[1]].split(" ")[-1]

            # 获取作者
            t = re.search(r"作者：\S+", info).span()
            author = info[t[0]: t[1]].split("：")[-1]

            if(all([key in source for key in keys])):

                # 爬取img标签
                imgSrc = soup.findAll("img", {"class": "img_vsb_content"})

                # 获取所有图片
                imgs = soup.findAll("img", {"class": "img_vsb_content"})
                imgsUrl = [urlRoot + img["src"] for img in imgs]

                # 把图片的src换成绝对地址
                for i in range(len(imgsUrl)):
                    imgSrc[i]["src"] = imgsUrl[i]

                # 在替换图片地址之后，爬取内容
                contents = soup.find_all("div", {"class": "v_news_content"})[0]


                new = {
                    "title": li["title"],
                    "date": li["date"],
                    "author": author,
                    "source": source,
                    "content": str(contents),
                }

                newsList.append(new)

        except:
            continue
    if len(newsList) > 0:
        mkdir(newsList)


'''
爬取目录页的所有新闻，并筛选时间
[{title,date, url}, {...}]
'''
def openFirstUrl(lis):

    # 输入搜索关键字
    keys = input("input keys:").split()

    # 输入开始和结束的时间
    dates = input("input dates:").split()
    for i in range(len(lis)):
    # for i in range(10):
        
        print("进入目录第", i, "页...")
        response = requests.get(urlList[i])
        response.encoding = 'utf-8'
        html_text = response.text
        soup = BeautifulSoup(html_text, 'html.parser')

        # 获取此页新闻所有li
        lis = soup.findAll('li', {'class': 'wow fadeInUp animated'})
        lis = list(filter(lambda li: timeFilter(li, dates), lis))  # 筛选时间
        # [{title, date, url}, {}, ...]
        lis = [{"title": li.find("a").text.replace("|", " "), "date": li.find(
            "i").text, "url": li.find("a").get("href")} for li in lis]

        # print(lis)
        
        # 进入二级url
        if lis:
            openSecondUrl(lis, keys)



urlRoot = "http://www.usth.edu.cn/"
urlList = ["http://www.usth.edu.cn/kdyw.htm"]

if __name__ == "__main__":

    # 获取总页码信息
    response = requests.get(urlList[0])
    response.encoding = 'utf-8'
    html_text = response.text        
    soup = BeautifulSoup(html_text, 'html.parser')
    lastPage = int(soup.findAll("span", {"class": "p_no"})[-1].text)

    # 构造分页url列表
    for i in range(lastPage - 1, 0, -1):
        url = "http://www.usth.edu.cn/kdyw/" + str(i) + ".htm"
        urlList.append(url)
    
    openFirstUrl(urlList)


    print("爬取完成")
    
