
import requests,re
from datetime import datetime,timedelta
from lxml import etree
from tinydb import where
from common import UID,HandleTmpList,parseContentToName,ProductToGroup,sess
SPIDERNAME='百度资讯'

def handleTime(time_str):
    '''轻语财经              				
						8小时前'''
    now=datetime.now()
    if len(time_str)==1:
        s=time_str[0]
        if '分钟' in s:
            m=int(re.search(r'(\d)分钟',s).group(1))
            delta=timedelta(minutes=m)
        elif '小时' in s:
            h=int(re.search(r'(\d)小时',s).group(1))
            delta=timedelta(hours=h)
        else:
            delta=timedelta(hours=0)
        s=(now-delta).strftime('%Y-%m-%d %H:%M:%S')
        
    elif len(time_str)==2:
        s=' '.join(time_str)
        s=s.replace('年','-').replace('月','-').replace('日','')+':00'
        
    else:
        s=now.strftime('%Y-%m-%d %H:%M:%S')

    return s

def getBaiduZixunArticleList(articleCol,BeCrawledUrlList):
    ProductNameTuple={
        '苹果':['库存','种植面积','消费量','种植利润'],
        '红枣':['库存','种植面积','消费量','种植利润'],
        '原油':['库存','进口量','消费量'],
        '燃油':['库存','消费量'],
        '液化石油气':['库存','消费量'],
        '沥青':['库存','消费量'],
        '棉花':['库存','消费量','种植面积'],
        '玉米':['库存','消费量','种植面积'],
        '大豆':['库存','消费量','种植面积','进口量','加工费'],
        '豆粕':['库存','消费量','加工费','开工率'],
        '橡胶':['库存','消费量','开割面积','进口量','开工率'],
        '棕榈油':['库存','消费量','种植面积','进口量'],
        '豆油':['库存','消费量','加工费'],
        '菜油':['库存','消费量','加工费'],
        '螺纹钢':['库存','消费量','加工费','开工率'],
        '焦炭':['库存','消费量','加工费','开工率'],
        '铁矿石':['库存','消费量','发运量'],
        '动力煤':['库存','消费量','发运量'],
        '焦煤':['库存','消费量','发运量'],
        'PTA':['库存','消费量','开工率'],
        '甲醇':['库存','消费量','开工率'],
        '乙二醇':['库存','消费量','开工率'],
        '苯乙烯':['库存','消费量','开工率'],
        '白糖':['库存','消费量','种植面积','加工费','进口量'],
        '鸡蛋':['库存','消费量','存栏量','养殖利润'],
        '镍':['库存','消费量'],
        '锌':['库存','消费量'],
        '铜':['库存','消费量'],
        '铅':['库存','消费量'],
        '铝':['库存','消费量'],
        # '黄金':['库存',],
        '白银':['库存',],
        '玻璃':['库存','消费量','加工费','开工率'],
        '纯碱':['库存','消费量','加工费','开工率'],
        '纸浆':['库存','消费量','进口量','开工率'],
    }

    for product in ProductNameTuple:
        
        for keyword in ProductNameTuple[product]:
            temp_article_ls=[]
            param={
                'tn': 'news',
            'rtt': 4,
            'bsst': 1,
            'cl': 2,
            'wd': '%s %s'%(product,keyword),
            'medium': 0,
            }
            r=sess.get('https://www.baidu.com/s',params=param)
            selector=etree.HTML(r.text)
            for ele in selector.cssselect('div.result'):

                href=ele.xpath('./h3/a/@href')[0].strip()
                if len(articleCol.search(where('url')==href)):break   #判断重复
                temp_ls=ele.xpath('./h3/a')
                title=temp_ls[0].xpath('string(.)').strip()
                if len(articleCol.search(where('title')==title)):break   #判断重复  标题相同，十有八九是一文多发
                temp_ls=ele.xpath('./div')
                time_str=temp_ls[0].cssselect('p.c-author')[0].xpath('string(.)').strip()
                content=temp_ls[0].xpath('string(.)').strip()
                # print(href,title,content)
                # white_name_ls=['种植','果农','期货','大宗商品',]
                flag=False
                if product=='苹果':
                    black_name_ls=['手机','iphone','app','ios','科技','华为','美国','库克','苹果公司','芯片','电脑',]
                    s=title.lower()+content.lower()
                    
                    for w in black_name_ls:
                        if w in s:   #存在苹果手机关键字
                            flag=True
                            break
                if flag:continue

                try:
                    publicTime=handleTime(time_str.split()[1:])
                except:
                    print(time_str)
                temp_dict={'tags':['baidu'],'score':0,'uid':UID()}
                temp_dict['title']=title.strip()
                temp_dict['articleFrom']='百度资讯'
                temp_dict['url']=href.strip()
                temp_dict['publicTime']=publicTime.strip()
                temp_dict['content']=content
                temp_dict['product_name']=product
                temp_dict['group']=ProductToGroup[product]
                print(title,product)
                temp_article_ls.append(temp_dict)

            #注意缩进不要错
            HandleTmpList(temp_article_ls,articleCol,SPIDERNAME)