#先安装 aiohttp  asyncio     aiofiles三个库
import aiohttp
import asyncio
import aiofiles
from bs4 import BeautifulSoup as bSoup
import datetime
import json
import time

session = None
error_urls = []
error_content_urls = []

#字符编码集，可添加编码
#如遇编码错误，可适当添加其他编码
code_format_list = ['gb2312','utf-8','gbk']

#只需实现下列三个函数即可
#对获取到的新闻内容进行解析
#返回值，新闻内容
#######################################################################################################################
#根据网页html,修改对应的标签,获取到新闻内容
def content_parsing(shtml:str) -> str:
    
    soup = bSoup(shtml,'lxml').find('div',id='about_txt')
    
    if soup == None:
        return ''
        
    soup = soup.find_all('p')
    
    contents = [x.text for x in soup]
    
    return '\n'.join(contents)

#生成url
def create_url(day:datetime.datetime) -> str:
    #根据具体网址修改url( day.__format__('%Y%m%d') )可能会变
    return 'http://tv.cctv.com/lm/xwlb/day/' + day.__format__('%Y%m%d') + '.shtml'
    
#对于不同网页html适当修改本函数即可
#解析html拿到具体的新闻链接及标题
def urls_parsing(shtml:str):
    dsoup = bSoup(shtml,'lxml').find_all('a')
    urls = [x.get('href').replace('news.cntv.cn','tv.cctv.com') for x in dsoup]
    
    titles = [str(x.text) for x in dsoup]#获取新闻标题
    
    
    return urls,titles
#######################################################################################################################


#bytes to string
#对二进制数据进行编码
def b2s(bstr:bytes) -> str:
    sstr = ''
    #遍历编码集，进行编码试探
    for x in code_format_list:
        try:
            sstr = bstr.decode(x)
            break
        except:
            sstr = ''
    
    return sstr


#获取具体的新闻内容
async def get_content(url:str,sem) -> str:
    async with sem:
        async with session.get(url) as resq:
            
            if resq.status != 200:
                error_content_urls.append('[link_error]' + url)
                return
            
            #获取网页二进制数据
            shtml = await resq.read()
            #编码
            shtml = b2s(shtml)
            
            if shtml == '':
                error_content_urls.append('[code_error]' + url)
                return ''
                
            #解析html
            content = content_parsing(shtml)
            print(url)
            
            return content
            
#获取单日新闻
async def get_daily_news(day:datetime.datetime,sem):    
    async with sem:
        url = create_url(day)
        tar_file = './news/' + day.__format__('%Y%m%d') + '.json'
        async with session.get(url) as resq:
            if resq.status != 200:
                error_urls.append('[link_error]' + url)     #链接错误
                return 
                
            #异步获取网页内容
            shtml = await resq.read()
            shtml = b2s(shtml)
                
            #编码不了视为错误
            if shtml == '':
                error_urls.append('[code_error]' + url)     #编码错误
                return
                
            urls,titles = urls_parsing(shtml)
            
            #获取新闻内容
            tasks = [asyncio.create_task(get_content(x,sem)) for x in urls]
            await asyncio.wait(tasks)
            contents = [x.result() for x in tasks]
                            
            news = [{"id": i,"url": urls[i],"title": titles[i],"content": contents[i]} for i in range(len(urls))]
            
            json_dict = {'date':day.__format__('%Y%m%d'),'news':news}
            json_str = json.dumps(json_dict,ensure_ascii=False)
            #写入文件
            async with aiofiles.open(tar_file,'w',encoding='utf-8', errors='ignore') as f:
                await f.write(json_str)
            print(url)
        

sleep_time = [5,8,13,16,21,24,29,32,37,40]
#任务创建入口，这里是获取所属时间段内所有新闻
async def get_news(sdate,edate,sem):
    global session
    async with sem:
        #初始化一个会话
        session = aiohttp.ClientSession()
        tasks = []
        i = 0
        while sdate<edate:
            #限制同时运行的任务数量
            if len(tasks) == 16:
                await asyncio.wait(tasks)
                tasks = []
                #print('sleep',sleep_time[i])
                time.sleep(sleep_time[i])
                i += 1
                i %= len(sleep_time)
                continue
            #构造任务并添加至任务列表
            tasks.append(asyncio.create_task(get_daily_news(sdate,sem)))
            sdate += datetime.timedelta(days=1)
            #print(len(tasks))
        
        time.sleep(3 + i)
        await asyncio.wait(tasks)
        #关闭会话
        await session.close()
    #将报错url写入文件
    write_error_to_file()
    
    
def write_error_to_file():
    with open('./errors/error_urls.txt','r') as f:
        error_urls_file = f.readlines()
    
    with open('./errors/error_urls.txt','a') as f:
        for x in error_urls:
            if x+'\n' not in error_urls_file:
                f.write(x+'\n')
            else:
                print('exist')
    
    with open('./errors/error_content_urls.txt','r') as f:
        error_content_file = f.readlines()
        
    with open('./errors/error_content_urls.txt','a') as f:
        for x in error_content_urls:
            if x+'\n' not in error_content_file:
                f.write(x+'\n')
            else:
                print('exist')

if __name__ == '__main__':
    
    sdate = datetime.datetime(2016,2,3)
    edate = datetime.datetime(2016,12,31)
    sem=asyncio.Semaphore(512)
    asyncio.run( get_news(sdate,edate,sem) )
    
    
    

