'''
爬取影像故事新闻
'''
import time
from Apr13.pcUtils import *
s = Scrawler()
url = "https://www.jiemian.com/lists/44.html"
#图片选择器路径
#load-list > li:nth-child(1) > div.card-list__img.news-img > a > img
titles = s.getTexts(url,selector='h3')
picUrl = s.getImgUrl(url,selector='div.card-list__img.news-img a img')
for index,url in enumerate(picUrl):
    s.getImgs(url,imgName="PhotoNews-"+str(index)+".jpg")
time.sleep(0.1)

'''
for page in range(1,11):
    url = "https://www.jiemian.com/tags/2066/{}.html".format(page)
    #1.获取该页面上每一条新闻的标题
    #选择器路径    #onlyTotopFlag > div > div > div > div.columns.columns-left > div.tag-list-news > div:nth-child(1) > div.news-right > div.news-header > h3 > a
    titles = s.getTexts(url,selector="h3 a")
    print(titles,len(titles))
    #1.1获取每一条新闻的链接
    newsurls = s.getUrl(url,selector="h3 a")
    print(newsurls)
    #1.2根据URL获取新闻详情
    #body > div.article-wrapper-box > div:nth-child(5) > div.content > div > div.content-container > div.main-container > div.article-view > div.article-main > div.article-content > p:nth-child(2)
    for index,newsurl in enumerate(newsurls):
        contents = s.getTexts(url,selector="div.article-view div.article-main div.article-content p")
        print("这条新闻的内容就是这样。")
        with open("第{}页".format(page)+"第{}条.txt".format(index),"w+") as f:
            f.write(str(contents))

    #2.获取新闻简介
    #选择器路径    #onlyTotopFlag > div > div > div > div.columns.columns-left > div.tag-list-news > div:nth-child(1) > div.news-right > div.news-main > p
    intro = s.getTexts(url,selector="div.news-main p")
    print(intro)

    #3.获取发布单位
    #选择器路径    #onlyTotopFlag > div > div > div > div.columns.columns-left > div.tag-list-news > div:nth-child(1) > div.news-right > div.news-footer > p > span.author > a
    org = s.getTexts(url,selector="p span.author a")
    print(org)

    #4.获取发布时间
    #选择器路径    #onlyTotopFlag > div > div > div > div.columns.columns-left > div.tag-list-news > div:nth-child(1) > div.news-right > div.news-footer > p > span.date
    Time = s.getTexts(url,selector="div.news-footer p span.date")
    print(Time)
    with open("result.txt","w+") as f:
        f.write(r"这是第{}页的新闻：\n".format(page)+str(titles))
    print("第{}页爬取完毕".format(page))
    time.sleep(0.1)
    
'''

