
# -*- coding:utf-8 -*-
import requests as rq
import re
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
}
MAIN_URL = "https://www.chinanews.com/"
def getRegion():
    homepage = open("homepage.txt",'w',encoding='utf-8')
    regions = []
    text = ""
    try:
        url = rq.get(MAIN_URL,headers = header,timeout = 5)
        url.encoding = 'utf-8'    
        text = url.text
        text = text.replace('\n','').replace('\r','').replace(' ','')
    except:
        print("Error to connet " + MAIN_URL)
    homepage.write(text)
    homepage.close()
    pattern = re.compile(r'http://www.[a-z][a-z][a-z]?.chinanews.com/')
    try:
        # matchObj = re.search(pattern,text)
        regions = pattern.findall(text)
    except:
        print("Error to extract")
    return regions

def getLink(region_list):
    news = set()
    for i in range(len(region_list)):
        try:
            temp_html = rq.get(region_list[i],headers = header,timeout = 5)
        except:
            print("Error to connet "+region_list[i])
        temp_html.encoding = 'utf-8'
        temp_text = temp_html.text
        temp_text = temp_text.replace('\n','').replace('\r','').replace(' ','')
        try:
            pattern = re.compile(r"href=\"/news.*?.html\"")
        except:
            print("Error to extract")
        temp_list = pattern.findall(temp_text)
        for j in temp_list:
            if(('<' in j) or ('>' in j)):
                pass
            else:
                if("www" in j):
                    j = j.replace("href=",'').replace("\"",'')
                    news.add(j)
                else:
                    j = j.replace("href=\"/",'').replace("\"",'')
                    j = region_list[i] + j
                    news.add(j)
        # print(temp_list)
    return news

def getNews():
    link_file = open("news_links.txt",'r',encoding='utf-8')
    link_list = link_file.readlines()
    link_file.close()
    link_set = set()
    for link in link_list:
        link_set.add(link)
    # news_file = open("text.txt",'a',encoding='utf-8')
    count = 0
    for link in link_set:
        count += 1
        link = link.replace('\n','')
        filename = "data/"+str(count)+".txt"
        singal_news = open(filename,'w',encoding='utf-8')
        print("------ ",count," ------")
        print(link)
        temp_text = ""
        try:
            temp_html = rq.get(link,headers = header,timeout = 5)
            temp_html.encoding = 'gbk'
            temp_text = temp_html.text
            temp_text = temp_text.replace('\n','').replace('\r','').replace(' ','')
        except:
            print("Error to connet " + link)
        pattern = re.compile('[\u4e00-\u9fa5]+')
        # print(temp_text)
        lists = pattern.findall(temp_text)
        for i in lists:
            # news_file.write(i+'\n')
            singal_news.write(i)
        singal_news.close()

if __name__ == "__main__":
    # region_list = getRegion()
    # region_list.append(MAIN_URL)
    # urls = set()
    # urls = getLink(region_list)
    # link_file = open("news_links.txt",'a',encoding='utf-8')
    # for i in urls:
    #     link_file.write(i+'\n')
    # link_file.close()
    # print(len(urls))
    # print(urls)
    getNews()   # 在运行完毕上述代码之后，运行该语句
    