import requests,json,os,time
from datetime import datetime
from lxml import etree
from tinydb import where
from common import UID,HandleTmpList,parseContentToName,ProductToGroup,sess
SPIERNAME='牛钱网'
    
def parseNiuMoneyArtContent(url):
    r=sess.get(url)
    r.encoding='utf8'
    selector=etree.HTML(r.text)
    ele_ls=selector.cssselect("section#wrap")
    if ele_ls:
        ele=ele_ls[0]
        content=ele.xpath("string(.)").strip()
    else:
        print('页面内容为空',url)
        content=''
    # print(content)
    return content

def getNiuMoneyArticleList(articleCol,BeCrawledUrlList):
    url='https://www.niumoney.com//' #农产品
    # url_2='https://www.niumoney.com/news/article_61_1#53' #能源化工
    # url_3='https://www.niumoney.com/news/article_61_1#54'  #有色金属
    # url_4='https://www.niumoney.com/news/article_61_1#55'  #黑色系
    # url_5='https://www.niumoney.com/news/article_61_1#75'  #投资研报
    params={
        'app': 'article',
        'action': 'fetchArticles',
        'callback': 'jQuery1830754660537088399_'+str(int(time.time()*1000)),
        'tagId': '53',
        'page': '1',
        'pageSize': '10',
        '_': '1591412826433'
    }
    
    temp_article_ls=[]
    for tagId in (52,53,54,55,75):
        params['tagId']=tagId
        params['_']=str(int(time.time()*1000))
        r=sess.get(url,params=params)
        #提取json
        start=r.text.find('{')
        end=r.text.rfind('}')
        dc=json.loads(r.text[start:end+1])
        #定位每一篇文章
        eleList=dc['data']['data']
        for ele in  eleList:
            #链接
            articleUrl=ele['url']
            #判断是否已经爬取过，如果是，跳出循环
            if len(articleCol.search(where('articleUrl')==articleUrl)):break
            # if articleUrl in BeCrawledUrlList:break
            title=ele['title']
            publicTime=ele['add_time']
            temp_dict={'tags':['niumoney'],'score':0,'uid':UID()}
            temp_dict['title']=title.strip()
            temp_dict['articleFrom']='niumoney.com'
            temp_dict['url']=articleUrl.strip()
            temp_dict['publicTime']=publicTime.strip()
            #文章内容
            content=parseNiuMoneyArtContent(articleUrl)
            temp_dict['content']=content
            #定文章所属期货品种,板块
            n=parseContentToName(title+content)
            if n:
                print(SPIERNAME,'   ',title,'     ',n)
                temp_dict['product_name']=n
                temp_dict['group']=ProductToGroup[n]
            else:
                print("………………………………未找到品种名称，可能异常")
                temp_dict['product_name']=''
                temp_dict['group']=''

            temp_article_ls.append(temp_dict)

    #注意缩进不要错
    HandleTmpList(temp_article_ls,articleCol,SPIERNAME)
    
   

