#coding=utf-8
#!/usr/bin/python2.7.5
import sys
import urllib
import Scrapy
import urllib2
import requests
import json
import time
import logging
import os


logFilePath = '/opt/spider/zhaobiaowang/log.log' #到服务器上写成绝对路径
biddingIdSendLogPath = '/opt/spider/zhaobiaowang/bidding.log' #到服务器上写成绝对路径
weixinReciver = ['亿阳-四川-颜露','亿阳-四川-曾昭国','亿阳-四川-销售-姚瑞','亿阳-四川-蒲素','亿阳-四川-张华','亿阳-四川-崔勇','亿阳-四川-徐巍','亿阳-四川-赵子银','亿阳-四川-销售-杨安东','亿阳-四川-李良国']
#weixinReciver = []
toMail = 'yanlu@boco.com.cn,liliangguo@boco.com.cn,1299269184@qq.com,yanchanggang2015@boco.com.cn,pusu@boco.com.cn,cuiyong@boco.com.cn,zengzhaoguo@boco.com.cn,yaorui@boco.com.cn,zhanghua@boco.com.cn,yangandong@boco.com.cn,zhaoziyin@boco.com.cn,xuwei@boco.com.cn'
#toMail = '1299269184@qq.com'

logging.basicConfig(level=logging.INFO,
                format='[%(asctime)s %(filename)s line:%(lineno)d %(levelname)s] %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S',
                filename=logFilePath,
                filemode='a')

reload(sys)
sys.setdefaultencoding('utf-8')

weixinUrl = "http://139.199.82.220:5000/wx"
msgtemplet = '您好！\n中国移动采购与招标网%s发布新的招标公告:\n%s\n关键字:%s\n详细内容，已经发送邮件到您邮箱，请注意查收\n网页地址:%s'


mailUrl = 'http://139.199.82.220:8380/mail'
keyWord = ['网络优化', '网优', '电子运维', '局数据', '智能巡检', '维护保障', '大数据', '分析',
           '维保', '管理系统', '系统集成', '虚拟化', '资源池', '云计算', '综合资源', '综资', '资源管理',
           '设备维保', '维保维护', '系统维护', '应急保障', '应急管理', '应急调度', '工程建设', '物流管理']

filterWord = ['电梯', '土建']

"""
匹配字段，标题或者正文中包含了匹配字段的才选择发送
"""
def match(bidding):
    for word in keyWord:
        if word in bidding['content'] or word in bidding['title']:
            logging.info(bidding['title'] + ' match keyword:' + word)
            return word
    return None

"""
过滤字段，正文中如果包含了过滤的字段就不发送
"""
def filter(bidding):
    for word in filterWord:
        if word in bidding['content']:
            return True
    return False


def doMain():
    for biddingId in Scrapy.getTodayIds():
        bidding = Scrapy.getDetail(biddingId)
        if bidding == None:
            continue
        key = match(bidding)
        toDay=time.strftime('%Y-%m-%d',time.localtime(time.time()))
        checkBiddingId=toDay+" "+biddingId
        # if key:
        if key and (not filter(bidding)) and (not alreadySend(biddingId)):
        # if not alreadySend(biddingId):
            logSendBidding(biddingId)
            bidding = dict({'keyword': key}, **bidding)
            logging.info("sendding msg for bidding, id=" + biddingId)
            sendWeixin(bidding)
            sendMail(bidding)


def sendWeixin(bidding):

    for user in weixinReciver:
        wxmsg = msgtemplet%(time.strftime("%Y-%m-%d"), bidding['title'], bidding['keyword'], Scrapy.contentUrl + bidding['id'])
        param = {'wxmsg': wxmsg, 'wxuser': user}
        print(wxmsg)
        requests.post(weixinUrl, param)

def sendMail(bidding):

    content = bidding['content']
    title = bidding['title']
    param = {'toEmail': toMail, 'subject': title, 'content': content}

    if bidding['attachmentExist']:
        attachment = bidding['fileName']
        attachmentF = bidding['url']

        files = {'file': (attachment, open(urllib.urlretrieve(attachmentF)[0], 'rb'))}
        requests.post(mailUrl, param, files=files)
    else:
        requests.post(mailUrl, param)

"""
记录招标信息对应的招标ID
"""
def logSendBidding(biddingId):
    with open(biddingIdSendLogPath, 'a') as f:
        toDay=time.strftime('%Y-%m-%d',time.localtime(time.time()))
        f.write(toDay+" "+biddingId + "\n")

"""
检查招标信息发送记录文件中是否已发送
"""
def alreadySend(biddingId):
    if not os.path.exists(biddingIdSendLogPath):
        return False
    with open(biddingIdSendLogPath, 'r') as f:
        for line in f.readlines():
            line = line.strip()
	    print(line)
            if line.find(" ") == -1:
                if biddingId == line:
                    return True
            else:
                if biddingId == line.split(" ")[1]:
                    return True
        return False


if __name__ == '__main__':
    logging.info('==========Scrapy start!=========')
    doMain()
    logging.info('===========Scrapy end!==========')
