#!/usr/bin/env python
# -*- coding: utf-8 -*-
from i_downloader.thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
import signal, time, sys, multiprocessing, traceback, os, threading
sys.path.append('..')
import urllib
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol.TBinaryProtocol import TBinaryProtocol as TBinaryServerProtocol
from thrift.transport.TTransport import TMemoryBuffer
from bdp.i_crawler.i_downloader import DownloadService
from bdp.i_crawler.i_downloader.ttypes import DownLoadReq
from bdp.i_crawler.i_downloader.ttypes import SessionCommit
from bdp.i_crawler.i_downloader.ttypes import Proxy
from i_util.pybeanstalk import PyBeanstalk
from i_downloader.conf import beanstalk_conf
import random
import logging
from lxml import etree
try:
    def to_string(page_info):
        str_page_info = None
        try:
            tMemory_b = TMemoryBuffer()
            tBinaryProtocol_b = TBinaryServerProtocol(tMemory_b)
            page_info.write(tBinaryProtocol_b)
            str_page_info = tMemory_b.getvalue()
        except EOFError, e:
            logging.warning("cann't write DownLoadRsp to string")
        return str_page_info
    transport = TSocket.TSocket('localhost', 12210)
    transport = TTransport.TBufferedTransport(transport)
    protocol = TBinaryProtocol.TBinaryProtocol(transport)
    client = DownloadService.Client(protocol)
    transport.open()


    data = {
        "ReadViewEntries": "",
        "PreFormat": "",
        "RestrictToCategory": "彩电策略与供应链管理中心/产品管理中心/规划组/原型新品需求通知单-1",
        "m_Seq": 0.4109518000441046,
        "Start": 600,
        "Count": 1000,
    }
    output_tube='test_new_req'

    beanstalk=PyBeanstalk(beanstalk_conf['host'], beanstalk_conf['port'])

    base_url = 'http://kmoa.konka.com/lks/koa/lks_km_yx_pbyyzx.nsf/VD_ByClass?' + urllib.urlencode(data)
    header = {'Host': 'kmoa.konka.com',
              'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0',
              'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
              'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
              'Accept-Encoding': 'gzip, deflate',
              'Referer': 'http://kmoa.konka.com/lks/koa/lks_km_yx_pbyyzx.nsf/VD_ByClass?ReadViewEntries&PreFormat&RestrictToCategory=%u5F69%u7535%u7B56%u7565%u4E0E%u4F9B%u5E94%u94FE%u7BA1%u7406%u4E2D%u5FC3/%u4EA7%u54C1%u7BA1%u7406%u90E8/ODM%u9879%u76EE/%u89C4%u5212%u7EC4-1&m_Seq=0.4109518000441046&Start=32&Count=2000',
              'Cookie': 'JSESSIONID=748ED2D70FBF77717858FEBA36B1E4CA; LtpaToken=AAECAzU3QkZGOTlFNTdDMDMxREVobHdrZmNz/GvVrJWMJDIOLkN2ZhpJJw6wsIs=',
              'Connection': 'keep-alive'
              }
    #?opendocument & m_Seq = 0.15046018986868992
    req = DownLoadReq(url=base_url,http_header=header)
    req.download_type = 'simple'
    res = client.download(req)
    list_kangjia=[]
    xodc = etree.XML(res.content)
    viewtry = xodc.xpath('//viewentries/viewentry')
    print len(viewtry)
    cnt=1
    for i in viewtry:
        cnt+=1
        uid = i.xpath('@unid')[0]
        # print uid
        if uid=='0C92C8F772F4405848257C0E0006A3C3':
            print cnt
            url = 'http://kmoa.konka.com/lks/koa/lks_km_yx_pbyyzx.nsf/VD_ByClass/{}'.format(uid)

            urla='http://kmoa.konka.com/lks/koa/lks_km_yx_pbyyzx.nsf/VD_ByClass/0C92C8F772F4405848257C0E0006A3C3'
            req = DownLoadReq(url=urla, http_header=header)
            req.download_type='simple'
            res=client.download(req)
            print res.content
            str_page_info = to_string(req)

            beanstalk.put(output_tube, str_page_info)
        print cnt
















    # req.download_type='simple'
    # #req.proxy=proxy
    # req.priority=0
    # req.time_out=30
    # req.http_header=http_header
    # req.retry_times=1
    # req.post_data=post_data
#    req.session_commit=session_commit


    #
    # res=client.download(req)
    # print res.url
#    print vars(res)
#    print res.status
#   print res.content
#    print res.url,                   #必选字段，下载的url
#    print res.redirect_url,          #必选字段，跳转url，无跳转则为跳转前url
    # print res.src_type,              #可选字段，url来源
    # print res.status,                #必须字段，下载状态
#    print res.http_code,             #必须字段，http返回码
#    print res.download_time,         #可选字段，下载时间
    # print res.elapsed,               #可选字段，下载耗时
    # print res.pages,                 #可选字段，下载的页面历史
    # print res.content_type,          #可选字段，下载的类型
    # print res.content,               #可选字段，网页的内容
#    print res.page_size,             #可选字段，网页的大小
    # print res.scheduler,             #可选属性，调度相关信息
    # print res.parse_extends,         #可选属性, 解析相关信息
    # print res.data_extends,          #可选属性，扩展属性
    # print res.info                   #可选属性，额外的信息








    # Close!
    transport.close()
#iutl改了log   i——config改了mysql密码,,phantomjs改了log
except Thrift.TException, tx:
    print '%s' % (tx.message)