# -*- coding: utf-8 -*-
__author__ = 'fengoupeng'

import sys
sys.path.append('..')

#import conf
from i_processor.model import JDProduct
from i_util.pymysql import PyMySQL
from i_util import tools
from i_util.i_crawler_services import ThriftScheduler, ThriftDownloader, ThriftExtractor, ThriftProcessor

# scheduler = ThriftScheduler(conf.scheduler_host, conf.scheduler_port)
# downloader = ThriftDownloader(conf.downloader_host, conf.downloader_port)
# downloader = ThriftDownloader('10.174.8.73', 7040)
# extractor = ThriftExtractor(conf.extractor_host, conf.extractor_port)
# processor = ThriftProcessor(conf.processor_host, conf.processor_port)
#processor = ThriftProcessor('10.174.8.48', 7024)
# processor = ThriftProcessor('101.201.196.67', 7024)
downloader = ThriftDownloader('127.0.0.1', 7040)
extractor = ThriftExtractor('127.0.0.1', 7060)

url = 'http://1-biao.com/'
item_task = {'task_id': '1',
#'url': url,
'doc_type': 'item',
'seed_id': '48780',
'page_num': '1',
'priority': '',
'site_id': '104',
# 'download_type': 'simple',
'download_type': 'phantom',
'url_format': 'http://www.bjztb.gov.cn/zbgg/2015',
# 'crawl_item': 'true',
# 'item_download_type' : 'phantom',
# 'check_body': '(</em>\s*?<i>[\d\.]+</i>\s*?</strong>)',
# 'check_body': u'(全部商品分类)',
# 'check_body': '<p class="price" id="product_\d+">([^<]{2,})</p>',
# 'check_body': '(<div class="nSearchWarp">|callback_product)',
# 'check_body': '',
'check_size': '100',
'avg_interval': '1',
}


status_body = downloader.download(url, item_task)
print status_body.status
body = status_body.body
print 'body : ', len(body)
print body

from i_extractor.config_handle import ConfigHandler
from i_extractor import conf as ExtractConf
config_handler = ConfigHandler(ExtractConf)
config_handler.load_config_from_database()
config = config_handler.get_config_by_url(url)
if not config:
    print "%s no config" % url

from i_extractor.extractor import RealExtractor
from i_util.logs import LogHandler
extract_log = LogHandler("extractor")
extrartor = RealExtractor(body, url, config, extract_log, ExtractConf)
info = {'site_id':'104'}
results, status = extrartor.extract_content_datas(info)
print "==========================="
print status
for r in results:
    print r
print "==========================="
task['url'] = url
item_task['url'] = url
status_results = extractor.extract(body=body, info=item_task)
print status_results.status
for r in status_results.results:
    print r
#status, results = extrartor.extract(body=body, info=task)
