import requests
from bs4 import BeautifulSoup as bSoup
import datetime
import json
import time

#编码
codeFormat = 'gb2312'
#报错新闻链接
error_urls = []
#具体的新闻内容报错链接
error_content_urls = []

def get_soup(url:str):
    #请求网页
    f = requests.get(url)
    
    #判断状态码
    if f.status_code != 200:
        return None
    #编码
    f.encoding = codeFormat
    #返回一个BeautifulSoup对象
    return bSoup(f.text,"lxml")
    
#获取新闻内容，处理过程与 get_daily_news 类似
def get_content(url: str) -> str:
    soup = get_soup(url)
    
    if soup == None:
        error_content_urls.append(url)
        return ''
        
    soup = soup.find("div", id='content1').find_all('p')
    contents = [str(x.string) for x in soup]
    
    return '\n'.join(contents)
    
#获取新闻并打包成dict list
def get_daily_news(url:str)->list:
    #获取beautifulsoup对象
    soup = get_soup(url)
    
    #返回值为None则出现错误
    if soup == None:
        error_urls.append(url)
        return None
     
    #解析网页获取所有a标签
    dsoup = soup.find('div','title_list_box').find_all('a')
    
    #解析a标签获取具体新闻链接及标题
    urls = [x.get('href') for x in dsoup]
    titles = [x.string for x in dsoup]
    #根据链接获取具体新闻
    contents = [get_content(url) for url in urls]
    #辅助函数，打包dict
    to_dict = lambda i,x,y,z:{"id": i,"url": x,"title": y,"content": z}
    #打包news
    news = [to_dict(i,urls[i],titles[i],contents[i]) for i in range(len(urls))]

    return news
    
#将报错信息写入文件
def write_error_to_file():
    #获取报错链接，防止二次写入
    with open('./errors/error_urls.txt','r') as f:
        error_urls_file = f.readlines()
    
    with open('./errors/error_urls.txt','a') as f:
        for x in error_urls:
            if x+'\n' not in error_urls_file:
                f.write(x+'\n')
            else:
                print('exist')
    
    with open('./errors/error_content_urls.txt','r') as f:
        error_content_file = f.readlines()
        
    with open('./errors/error_content_urls.txt','a') as f:
        for x in error_content_urls:
            if x+'\n' not in error_content_file:
                f.write(x+'\n')
            else:
                print('exist')


if __name__ == "__main__":
    #爬取时间段
    sdate = datetime.datetime(2010,5,6)
    edate = datetime.datetime(2010,6,16)
    while sdate<edate:
        #根据日期生成网址
        url = 'http://news.cntv.cn/program/xwlb/' + sdate.__format__('%Y%m%d') + '.shtml'
        #获取新闻
        news = get_daily_news(url)
        #打包成dict
        json_dict = {'date':sdate.__format__('%Y%m%d'),'news':news}
        #转换成json字符串
        json_str = json.dumps(json_dict,ensure_ascii=False)
        #根据日期生成保存文件地址
        tar_file = './news/' + sdate.__format__('%Y%m%d') + '.json'
        #写入文件
        with open(tar_file,'w',encoding='utf-8', errors='ignore') as f:
                f.write(json_str)
        sdate += datetime.timedelta(days=1)
    
    #将报错的url写入文件
    write_error_to_file()
