#!/usr/bin/Python
import signal, time, sys, multiprocessing, traceback, os, threading
sys.path.append('..')
from i_util.tools import str_obj, unicode_obj
from thrift.protocol.TBinaryProtocol import TBinaryProtocol as TBinaryServerProtocol
from thrift.transport.TTransport import TMemoryBuffer
from bdp.i_crawler.i_downloader.ttypes import DownLoadRsp
from bdp.i_crawler.i_downloader.ttypes import CrawlStatus
from handler import DownloadHandler as Downloader
from i_util.pybeanstalk import PyBeanstalk
from i_util.thread_pool import ThreadPool
from bdp.i_crawler.i_downloader.ttypes import RetStatus
import time
import json
cnt=0
starta=0
class DownloadHandler(object):
    def __init__(self, downloader, conf):
        self.log=conf.get('log')
        self.log.info('DownloadHandler load start')
        self.downloader, self.conf = downloader, conf
        beanstalk_conf = conf.get('beanstalk_conf')
        self.beanstalk = PyBeanstalk(beanstalk_conf['host'], beanstalk_conf['port'])
        self.input_tube = beanstalk_conf['input_tube']
        self.output_tube = beanstalk_conf['output_tube']
        self.log.info('DownloadHandler load finish')
        self.lock=threading.Lock()

    def to_string(self, page_info):
        str_page_info = None
        try:
            tMemory_b = TMemoryBuffer()
            tBinaryProtocol_b = TBinaryServerProtocol(tMemory_b)
            page_info.write(tBinaryProtocol_b)
            str_page_info = tMemory_b.getvalue()
        except EOFError, e:
            self.log.warning("cann't write DownLoadRsp to string")
        return str_page_info
    def prepare_download_rsp(self, url, content_type="html/text"):
        obj = DownLoadRsp()
        obj.url = url
        obj.elapsed = 50
        obj.content_type = content_type
        obj.status = 0
        obj.http_code = 200
        obj.download_time = int(time.time())
        return obj

    def beanstal_put(self, company,url, info):
        res = self.prepare_download_rsp(url)
        res.content = str_obj(info)
        res.page_size = len(info)
        res.data_extends = json.dumps({"company": company})
        base_page_info = self.to_string(res)
        self.lock.acquire()
        self.beanstalk.put(self.output_tube, base_page_info)
        self.lock.release()
    def download_task(self,req):
        ret = RetStatus()
        try:
            crawl_data=req.info
            msg=crawl_data.get('msg','')
            code=int(crawl_data.get('code',''))
            data=json.loads(crawl_data.get('data',''))
            if code==0:
                for i in data:
                    company=i.get('company','')
                    baseinfo=i.get('baseinfo','')
                    baseinfo_url=i.get('baseinfo_url','')
                    changeinfo=i.get('changeinfo','')
                    changeinfo_url=i.get('changeinfo_url','')
                    self.beanstal_put(company=company,url=baseinfo_url,info=baseinfo)
                    if  changeinfo_url!='':
                        self.beanstal_put(company=company,url=changeinfo_url,info=changeinfo)
            ret.status = 1
            ret.errormessage='success'
        except Exception as e:
            self.log.error(e.message)
            ret.status = 0
            ret.errormessage = e.message
        return ret
    def download(self, req):
        global cnt
        global starta
        if cnt==0:
            starta=time.time()
        if cnt==1000:
            pass
        if req.url==None:
            self.log.error('has no url')
            return DownLoadRsp(status=CrawlStatus.CRAWL_FAILT, )
        self.log.info("start_crawl\turl::%s\tmethod:%s\tdownload_type:%s"%(req.url,req.method,req.download_type))
        start = time.time()
        try:
            rsp = self.downloader.download(req)
            cnt+=1
        except Exception as e:
            self.log.error('url:'+req.url+'\terror_msg:'+str(traceback.format_exc()))
            rsp = DownLoadRsp(status=CrawlStatus.CRAWL_FAILT, )
        finally:
            content_len = -1
            if rsp.content:
                content_len = len(rsp.content)
            self.log.info('finish_crawl\tuse_time:'+str(time.time()-start)+'\tlens:'+str(content_len)+'\tstatus:'+str(rsp.status)+'\turl:'+str(req.url) )
        return rsp
    def commit_task(self,req):
        self.log.info('start_crawl\turl:'+req.url+'\tmethod:'+req.method+'\tdownload_type:'+req.download_type)
        start = time.time()
        try:
            rsp = self.downloader.download(req)
        except:
            self.log.error('url:'+req.url+'\terror_msg:'+str(traceback.format_exc()))
            rsp = DownLoadRsp(status=CrawlStatus.CRAWL_FAILT, )
        finally:
            use_time = int((time.time() - start)*1000.0)
            self.log.info('finish_crawl\turl:%s\tstatus:%s\thttpcode:%s\tuse_time:%s' % (req.url, rsp.status, rsp.http_code,use_time ))
        str_page_info = self.to_string(rsp)
        try:
            self.beanstalk.put(self.output_tube, str_page_info)
            self.log.info('beanstalk\turl:%s\ttube:%s' % (req.url,self.output_tube ))
        except Exception as e:
            self.log.info('beanstalk put error url:' + req.url + '\tmethod:' + req.method + '\tdownload_type:' + req.download_type)

    def commit_download_task(self, req):

        ret=RetStatus()
        try:
            self.commit_task(req)
            ret.status=1
        except Exception as e:
            ret.status=0
            ret.errormessage=e.message
            self.log.error('url:%s commit fail reason is %s'%(req.url,e.message))
        return ret
