#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.append('..')
from i_util.pybeanstalk import PyBeanstalk
from bdp.i_crawler.i_downloader.ttypes import DownLoadRsp
import pymongo
import traceback
import logging
import time
from i_util.tools import str_obj
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.transport.TTransport import TMemoryBuffer
reload(logging)
logging.basicConfig(level=logging.DEBUG,
                format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                datefmt='%a, %d %b %Y %H:%M:%S',
                filename='./wenshu.log',
                filemode='a')
class WebPage():
    def __init__(self):
        self.beanstalk = PyBeanstalk("cs1", 11300)
        self.output_tube = 'online_download_rsp'
        self.mongo_client = pymongo.MongoClient('172.16.215.2', 40042)

    def req_to_string(self, req):
        str_req = ""
        try:
            tMemory_b = TMemoryBuffer()
            tBinaryProtocol_b = TBinaryProtocol(tMemory_b)
            req.write(tBinaryProtocol_b)
            str_req = tMemory_b.getvalue() 
        except:
            self.logger.error('crawled_failt\terror:%s' % (traceback.format_exc()))
            #print traceback.format_exc()
        return str_req
    def process_docment(self, ori_obj):
        try:
            src_url = ori_obj['url']
            obj = DownLoadRsp()
            obj.url = src_url
            obj.elapsed = 50
            obj.content_type = 'html/text'
            obj.status = 0
            obj.http_code = 200
            obj.download_time = int(time.time())
            obj.content = str_obj(ori_obj['content'])
            obj.page_size = len(obj.content)
            str_req = self.req_to_string(obj)
            print src_url
            if len(obj.content) >100:
                print src_url, len(obj.content)
                self.beanstalk.put(self.output_tube, str_req)
        except Exception as e:
            print e
            logging.info(traceback.format_exc())
    
    def get_data_from_mongo(self, domain, url_format):
        self.mongo_client.crawl_merge_webpage.authenticate('work', 'haizhi')
        db = self.mongo_client.crawl_merge_webpage[domain]
        from datetime import date, timedelta
        today = date.today()
        #query = {"url":{'$regex' : r'^http://www.bjcourt.gov.cn/ktgg/ktggDetailInfo.htm'}, "download_time":{"$lte":str(today+timedelta(days=1)), "$gte":str(today - timedelta(days=215))} }
        #query = {"url":{'$regex' : r'^http://www.bjcourt.gov.cn/ktgg/ktggDetailInfo.htm'}}
        query = {"url":{'$regex' : url_format}}
        print query
        cnt,put_cnt = 0, 0
        for item in db.find(query):
            try:
                cnt += 1
                if cnt % 500 == 0:
                    logging.info('from mongo\t%d' %cnt)
                self.process_docment(item)
                put_cnt += 1
                if put_cnt % 50 == 0:
                    logging.info('put_cnt\t%d' %put_cnt)
                    time.sleep(1)
            except Exception as e:
                logging.info(traceback.format_exc())
if __name__ == "__main__":
    domain = sys.argv[1]
    url_format = sys.argv[2]
    flyer = WebPage()
    flyer.get_data_from_mongo(domain, url_format)

