#引入所有网站的爬虫文件
from spiders import Myagric,TongHua,SinaFuture,Mysteel,TouTiao
from spiders import Yunken,ChinaGrain,EastMoney,Hexun,CnGold,QHRB
from spiders import Jinrongjie,AskCi,SMM,ChinaCotton,NiuMoney
from spiders import Gov,Baidu,LongZhong,JiaoLian,GongXinBu

import os
from threading import Thread
from time import sleep

from common import articleCol
#已经爬取的链接列表
BeCrawledUrlList=[]
count_0=articleCol.__len__()
# for item in articleCol.find({},{'url':1}):
#     BeCrawledUrlList.append(item.get('url',''))


#每一个网站的爬虫的入口函数
FuncList=[
    # Baidu.getBaiduZixunArticleList,
    Myagric.getMyagricArticle,
    SinaFuture.getSinaArticleList,
    Mysteel.getMysteelArticleList,
    Yunken.getYunkenArticleList,
    ChinaGrain.getChinaGrainArticleList,
    EastMoney.getEastMoneyArticleList,
    Hexun.getHexunArticleList,

    CnGold.getJinTouArticleLs,
    QHRB.getQHRBArticleList,
    Jinrongjie.getJinrongjieArticleList,
    # AskCi.getAskCiArticleList,
    # SMM.getSMMArticleList,
    TongHua.getTongHuaArtList,
    ChinaCotton.getChinaCottonArticleList,
    NiuMoney.getNiuMoneyArticleList,
    Gov.getGovArticleList,
    # LongZhong.getLongZhongArticleList,
    JiaoLian.getJiaoLianArticleList,
    GongXinBu.getGXBArticleList
]

pool=[]
for f in FuncList:
    #多线程
    # t=Thread(target=f,args=((articleCol,BeCrawledUrlList)))
    # t.start()
    # pool.append(t)

    #单线程
    try:
        f(articleCol,BeCrawledUrlList)
    except Exception as e:
        print(f.__name__,'异常')
        print(e)

for t in pool:
    t.join()

    # try:
    #     pass
    # except Exception as e:
    #     print('有错误')
    #     print(e)
    #     os.system('pause')

##各自的文件存储完成后，汇总

print('总共 %d  条'%articleCol.__len__())

#发送邮件
print('本次更新 %d 条'%(articleCol.__len__()-count_0))
