import urllib.request 
from urllib import error
from html.parser import HTMLParser
import time

# ======*********************===== #
date = None # 全局时间
# ======*********************===== #

# #################################################读文章模块##############################################################
def wordParse(url,date):
# date: /2020-01/23/
    Tit = None  # 标题！
    Ant = None  # 作者！
    Con = None  # 内容！
    time = date[1:11].replace("/","-")
    try:
        s = urllib.request.urlopen(url, timeout = 10)
    except error.HTTPError as e:
        print("遇到错误！休息 3 秒再试...")
        print(e.reason)
        time.sleep(3)
        s = urllib.request.urlopen(url, timeout = 10)
        
    # s = urllib.request.urlopen(url)
    Text = s.read().decode()
# ####################
    a = Text.find("<h1>")
    b = Text.find("</h1>")

    a = a + 4
    
    Tit = Text[a:b].replace("<br>","_")
    print(Tit)
# ####################
    a = Text.find("<h4>")
    b = Text.find("</h4>")

    a = a + 4
    
    Ant = '“'+ Text[a:b] + '”'
    print(Ant)
# ####################
    a = Text.find("<!--enpcontent-->")
    b = Text.find("<!--/enpcontent-->")

    a = a + len("<!--enpcontent-->")
    
    Con = Text[a:b]
    Con = Con.replace("<P>","")
    Con = Con.replace("</P>","\n")
    # print(Con)
    print(time)
# ####################
    with open(time+"_"+Tit+"_"+Ant+".txt", "w+") as f:
        f.write("人民日报 "+time+"\n"+Tit+"\n\t"+Ant+"\n"+Con)
    with open("有关疫情文章题目加一百字预览.txt","a+") as f:
        f.write(time+"《"+Tit+"》"+Ant+"："+Con[:100].replace("&nbsp", " ").replace("　","").replace("\n"," ").replace("\r"," ") +"\n")
# End of def wordParse(url,date):
# ############################################从每版的目录页抓相关文章#########################################################
state = None
tempLink = None
KeyPageList = [] #! 输出存放
beforeWork = None # 最后点进文章时链接的前缀！

class hmsr(HTMLParser): # 解析其中一版提取文章链接的类
    
    def handle_starttag(self, tag, atters):
        global state
        global tempLink
        if tag == "div":
            # print(type(atters))
            # 开始处理div的参数
            if(len(atters) != 0):
                # print("有参数！")
                if atters[0][0] == "id" and atters[0][1] == "riqi_": # 这是今天的日期！
                    state = "riqi_"
                elif(atters[0][0] == "style" and atters[0][1] == "display:inline"):# 这是一条链接新闻内容！
                    state = "word!"
                    
            # 结束处理div的参数
        elif tag == "a":
            tempLink = atters[0][1]
            

    def handle_data(self, data):
        # =========================KEYS============================= #
        Keys = ["疫", "患", "急", "医", "肺", "口罩", "雷神山", "火神山","钟南山", "武汉", "病毒", "卫健委", "联防联控"]
        # =========================KEYS============================= #
        global state
        global tempLink
        global KeyPageList

        global beforeWork # 最后点进文章时链接的前缀！
        pagebase = "http://paper.people.com.cn/rmrb/html"  # 这个到哪都是一样的，就不全局了
        
        if state == None:
            return
        else:
            # 开始处理
            if(state == "riqi_"):
                print("现在处理的是日期")
                print(data)
                
            elif state == "word!":
                print(data + "\n\t链接为： "+ tempLink)
                isPass = False
                for key in Keys:
                    isPass = isPass or (data.find(key) != -1)
                if(isPass):
                    print("符合条件！")
                    KeyPageList.append(beforeWork+tempLink) # 将链接加入待访问列表
                else:
                    print("无关键字！")
            state = None
            # 处理结束

savedURL = []


def getWork(url):
    """file = "nbs.D110000renmrb_01.htm"
    date = "/2020-03/06/"
    page = "http://paper.people.com.cn/rmrb/html"+date+file"""

    try:
        s = urllib.request.urlopen(url, timeout = 10)
    except error.HTTPError as e:
        print("遇到错误！休息 3 秒再试...")
        print(e.reason)
        time.sleep(3)
        s = urllib.request.urlopen(url, timeout = 10)
    
    # s = urllib.request.urlopen(url)
    Text = s.read().decode()
    print(Text[0:100])

    GG = hmsr()
    GG.feed(Text)
# ##################################################遍历每一版#############################################################
chkState = None
pageL = []
i = 0
temLink = None
isNext = True

pagebase = "http://paper.people.com.cn/rmrb/html"
file = "nbs.D110000renmrb_01.htm"

class chkAllPage(HTMLParser):
    
    def handle_starttag(self, tag, atters):
        global chkState
        global pageL
        global i
        global temLink
        global isNext
        if(tag == 'a' and len(atters) == 2):
            if(atters[0][0] == "class" and atters[1][0] == "href"):
                print("有一个候选！")
                temLink = atters[1][1]# 候选，以免这是上一页
                chkState = "OPEN"
                isNext = False
                
                
                

    def handle_data(self, data):
        global chkState
        global pageL
        global i
        global temLink
        global isNext
        global date
        
        # print(chkState)
        if(chkState == "OPEN"):
            # print(date)
            if(data == "下一版 "):
                # print("哈哈哈"+date,pagebase + date + temLink)
                print(i)
                pageL.append(pagebase + date + temLink) #下一页！
                print("有效的下一页！ " + pageL[i])
                i = i + 1
                temLink = None
                chkState = None
                isNext = True
'''                
    def handle_endtag(self, tag):
        if(tag == "html" ):
            if(temLink == None):
                print("这一页没有有效的下一页！")
                chkState = "End"
            else:
                print("TEMLINK: "+temLink)

'''
def getList(inDate):
    global pageL
    global i
    global file
    # date = "/2020-03/06/"
    global pagebase
    global date
    date = inDate
    i = i + 1
    print(date)
    pageL.append(pagebase + date + file)
    page = pagebase + date + file
    cap  = chkAllPage()# cap :这是一个检查器对象的实例
    while isNext:
        if(chkState == "End"):
            break
        
        print("即将访问",page)
        try:
            s = urllib.request.urlopen(page, timeout = 10)
        except error.HTTPError as e:
            print("遇到错误！休息 3 秒再试...")
            print(e.reason)
            time.sleep(3)
            s = urllib.request.urlopen(page, timeout = 10)
            
        if(s.getcode() != 200):
            break
        cap.feed(s.read().decode())
        print("休息一下")
        time.sleep(0.1)
        page = pageL[i-1]
    print("抓取了",i,"版")
    print(pageL)

# ########################################################日期生成器#####################################################################
Day = [2020, 2, 25]
dayMax = None
def addDate(dd):
    #暂不支持跨年
    
    if dd[1] == 1 or dd[1] == 3 or dd[1] == 5 or dd[1] == 7 or dd[1] == 8 or dd[1] == 10 or dd[1] == 12:
        dayMax = 31
    elif dd[1] == 2:
        dayMax = 29
    else:
        dayMax = 30

    dd[2] = dd[2] + 1
    
    if(dd[2] > dayMax):
        dd[2] = 1
        dd[1] = dd[1] + 1
    if(dd[1] > 12):
        dd[0] = dd[0] + 1
    return dd

def toStr(dd):
    if dd[1] < 10:
        M = "0"+str(dd[1])
    else:
        M = str(dd[1])
        
    if dd[2] < 10:
        D = "0"+str(dd[2])
    else:
        D = str(dd[2])
    return "/"+str(dd[0])+"-"+M+"/"+D+"/"


# ##########################################################主函数###################################################################


def main():
    global chkState
    global pageL
    global i
    global temLink
    global isNext
    
    global pageL # 输出存放
    global i
    global file
    # date = "/2020-03/06/"
    global pagebase
    global date
    global Day
    global KeyPageList  # 输出存放

    global beforeWork # 最后点进文章时链接的前缀！
    
    while Day != [2020,3,6]:
        chkState = None
        # pageL = []
        # i = 0
        temLink = None
        isNext = True
        date = None
        pagebase = "http://paper.people.com.cn/rmrb/html"
        file = "nbs.D110000renmrb_01.htm"
        
        Day = addDate(Day)
        getList(toStr(Day))
        
    # getList("/2020-03/06/")
    for url in pageL:
        beforeWork = url[:url.rfind('/')+1]
        getWork(url) # 选择关联文章！
        time.sleep(0.1)

    # 下面开始读文章！
    for url in KeyPageList:
        wordParse(url,url[url.find("20")-1:url.find("20")+11])
        time.sleep(0.1)

        
main()
