# -*- coding: utf-8 -*-
__author__ = 'fengoupeng'

import sys
sys.path.append('..')

#import conf
from i_processor.model import JDProduct
from i_util.pymysql import PyMySQL
from i_util import tools
from i_util.i_crawler_services import ThriftScheduler, ThriftDownloader, ThriftExtractor, ThriftProcessor

# scheduler = ThriftScheduler(conf.scheduler_host, conf.scheduler_port)
# downloader = ThriftDownloader(conf.downloader_host, conf.downloader_port)
# downloader = ThriftDownloader('10.174.8.73', 7040)
# extractor = ThriftExtractor(conf.extractor_host, conf.extractor_port)
# processor = ThriftProcessor(conf.processor_host, conf.processor_port)
#processor = ThriftProcessor('10.174.8.48', 7024)
# processor = ThriftProcessor('101.201.196.67', 7024)
scheduler = ThriftScheduler('127.0.0.1', 7021)
downloader = ThriftDownloader('127.0.0.1', 7040)
processor = ThriftProcessor('127.0.0.1', 7024)
extractor = ThriftExtractor('127.0.0.1', 7060)

scheduler.start_one_site_tasks(105)

#index_task = {}
#index_task['url'] = 'http://list.jd.com/list.html?cat=737,794,1300'
#index_task['doc_type'] = 'index'
#index_task['page_num'] = str(2)
#index_task['site_id'] = str(1)
#index_task['seed_id'] = str(1)
#index_task['priority'] = str(-10)

# index_task = {}
# index_task['url'] = 'http://www.chinabidding.org.cn/BidInfoList_bt_0.html'
# index_task['doc_type'] = 'index'
# index_task['page_num'] = str(1)
# index_task['site_id'] = str(1)
# index_task['seed_id'] = str(48777)
# index_task['priority'] = str(-10)
# index_task['avg_interval'] = str(1.0)
# index_task['crawl_item'] = 'true'
# index_task['item_download_type'] = 'phantom'
#
# # task = {}
# # task['url'] = 'http://list.jd.com/list.html?cat=737,794,1300'
# # task['doc_type'] = 'index'
# # task['page_num'] = str(2)
# # task['site_id'] = str(1)
# # task['seed_id'] = str(1)
# # task['priority'] = str(-1)
# #
# # item_tasks = []
# # for i in range(1000):
# #     item_tasks.append(task)
# #
# # res = scheduler.schedule_tasks(index_task, item_tasks)
# # print res
# #
# # res = scheduler.schedule_tasks(index_task, item_tasks)
# # print res
#
#
# task = {}
# #task['url'] = 'http://www.chinabidding.org.cn/BidInfoList_bt_0.html'
# task['doc_type'] = 'item'
# task['page_num'] = str(1)
# task['site_id'] = str(1)
# task['seed_id'] = str(48777)
# task['priority'] = str(-1)
# task['avg_interval'] = str(1.0)
#
# item_tasks = []
# #for i in range(1000):
# for i in range(30):
#    item_tasks.append(task)
#
# res = scheduler.schedule_tasks(index_task, item_tasks)
# print res

# for i in range(1000):
#url = 'http://list.jd.com/list.html?cat=11729,11730,6910'
url = 'http://www.chinabidding.org.cn/BidInfoList_bt_0.html'
# url = 'http://list.jiuxian.com/4-0-0-0-0-0-0-0-0-0-0-0.htm'
# url = 'http://list.suning.com/0-157133-0.html'
# url = 'http://list.yhd.com/c0-0-120334/'
# url = 'https://s.taobao.com/list?spm=a21bo.7724922.8407-line-2.3.rs4erE&q=%E7%9A%AE%E8%A1%A3&cat=16&style=grid&seller_type=taobao&scm=1007.12013.16568.100000000000000'
# url = 'https://s.taobao.com/list?spm=a21bo.7724922.8407-line-2.3.rs4erE&q=%E7%9A%AE%E8%A1%A3&cat=16&style=grid&seller_type=taobao&scm=1007.12013.16568.100000000000000&bcoffset=-1&s=60'
# url = 'http://list.gome.com.cn/cloud/asynSearch?callback=callback_product&module=product&from=category&page=2&paramJson=%7B+%22mobile%22+%3A+false+%2C+%22catId%22+%3A+%22cat15985613%22+%2C+%22catalog%22+%3A+%22coo8Store%22+%2C+%22siteId%22+%3A+%22coo8Site%22+%2C+%22shopId%22+%3A+%22%22+%2C+%22regionId%22+%3A+%2211011400%22+%2C+%22pageName%22+%3A+%22list%22+%2C+%22et%22+%3A+%22%22+%2C+%22XSearch%22+%3A+false+%2C+%22startDate%22+%3A+0+%2C+%22endDate%22+%3A+0+%2C+%22pageSize%22+%3A+48+%2C+%22state%22+%3A+4+%2C+%22weight%22+%3A+0+%2C+%22promoFlag%22+%3A+0+%2C+%22sale%22+%3A+0+%2C+%22instock%22+%3A+1+%2C+%22filterReqFacets%22+%3A++null++%2C+%22rewriteTag%22+%3A+false+%2C+%22market%22+%3A+10+%2C+%22priceTag%22+%3A+0+%2C+%22cacheTime%22+%3A+190+%2C+%22parseTime%22+%3A+1475%7D'
# url = 'http://www.youku.com/v_olist/c_97_a_大陆_s_1_d_1_g_军事.html'
# url = 'http://www.amazon.cn/s/ref=lp_658428051_nr_n_7/477-8442593-7787520?fst=as%3Aoff&rh=n%3A658390051%2Cn%3A%21658391051%2Cn%3A658428051%2Cn%3A659039051&bbn=658428051&ie=UTF8&qid=1448523554&rnid=658428051'
# url = 'http://www.ymatou.com/sports_outdoors/c1783_1792'
task = {'task_id': '1',
#'url': url,
'doc_type': 'index',
'seed_id': '48777',
'page_num': '1',
'priority': '',
'site_id': '101',
# 'download_type': 'simple',
'download_type': 'phantom',
'crawl_item': 'true',
'item_download_type' : 'phantom',
# 'check_body': '(</em>\s*?<i>[\d\.]+</i>\s*?</strong>)',
# 'check_body': u'(全部商品分类)',
# 'check_body': '<p class="price" id="product_\d+">([^<]{2,})</p>',
# 'check_body': '(<div class="nSearchWarp">|callback_product)',
# 'check_body': '',
#'check_size': '1000'
'avg_interval': '1',
}

# url = 'http://www.cpppc.org:8082/efmisweb/ppp/projectLivrary/getPPPList.do'
# #url = 'http://www.shuidixy.com/company/enterpriseNews?curpage=0&pagesize=3'
# item_task = {'task_id': '1',
# #'url': url,
# 'doc_type': 'item',
# 'seed_id': '48778',
# 'page_num': '1',
# 'priority': '',
# 'site_id': '102',
# # 'download_type': 'simple',
# 'download_type': 'phantom',
# # 'crawl_item': 'true',
# # 'item_download_type' : 'phantom',
# # 'check_body': '(</em>\s*?<i>[\d\.]+</i>\s*?</strong>)',
# # 'check_body': u'(全部商品分类)',
# # 'check_body': '<p class="price" id="product_\d+">([^<]{2,})</p>',
# # 'check_body': '(<div class="nSearchWarp">|callback_product)',
# # 'check_body': '',
# #'check_size': '1000'
# 'avg_interval': '1',
# 'method':'post',
# 'data':{'queryPage':'2'},
# }


# status_body = downloader.download(url, item_task)
# print status_body.status
# body = status_body.body
# print 'body : ', len(body)
# print status_body.body

# from i_extractor.config_handle import ConfigHandler
# from i_extractor import conf as ExtractConf
# config_handler = ConfigHandler(ExtractConf)
# config_handler.load_config_from_database()
# config = config_handler.get_config_by_url(url)
# from i_extractor.extractor import RealExtractor
# from i_util.logs import LogHandler
# extract_log = LogHandler("extractor")
# extrartor = RealExtractor(body, url, config, extract_log, ExtractConf)
# content_datas_result = extrartor.extract_content_datas(None)
# print content_datas_result
# # print 'result : ', result
# # print 'content_datas : ', len(content_datas)
# task['url'] = url
# item_task['url'] = url
# status_results = extractor.extract(body=body, info=item_task)
# #status, results = extrartor.extract(body=body, info=task)
# print status_results


# d = {'product_id': '1578914', 'title': '\xe7\xbe\x8e\xe7\x9a\x84(Midea)Q535B \xe9\xbb\x91\xe6\x99\xb6\xe7\x8e\xbb\xe7\x92\x83\xe9\x9d\xa2\xe6\x9d\xbf \xe5\xae\x89\xe5\x85\xa8\xe7\x81\xb6\xef\xbc\x88\xe5\xa4\xa9\xe7\x84\xb6\xe6\xb0\x94\xef\xbc\x89', 'comment_num': '249', 'detail_url': '//item.jd.com/1578914.html', 'cat': '\xe5\xae\xb6\xe7\x94\xa8\xe7\x94\xb5\xe5\x99\xa8>\xe5\xa4\xa7 \xe5\xae\xb6 \xe7\x94\xb5>\xe7\x83\x9f\xe6\x9c\xba/\xe7\x81\xb6\xe5\x85\xb7'}
# status = processor.process(action_type='insert', items=[], info={'task_id':'1', 'site_id':'1'})
# print status

# d = {'product_id': '1578914', 'title': '\xe7\xbe\x8e\xe7\x9a\x84(Midea)Q535B \xe9\xbb\x91\xe6\x99\xb6\xe7\x8e\xbb\xe7\x92\x83\xe9\x9d\xa2\xe6\x9d\xbf \xe5\xae\x89\xe5\x85\xa8\xe7\x81\xb6\xef\xbc\x88\xe5\xa4\xa9\xe7\x84\xb6\xe6\xb0\x94\xef\xbc\x89', 'comment_num': '249', 'cat': '737%2C794%2C1300', 'detail_url': '//item.jd.com/1578914.html', 'type': '\xe5\xae\xb6\xe7\x94\xa8\xe7\x94\xb5\xe5\x99\xa8,\xe5\xa4\xa7 \xe5\xae\xb6 \xe7\x94\xb5,\xe7\x83\x9f\xe6\x9c\xba/\xe7\x81\xb6\xe5\x85\xb7'}
# jd = JDProduct()
# jd.load_by_dict(d)
# where_dict, data_dict = jd.todict()
# db = PyMySQL(conf.mysql_host, conf.mysql_port, conf.database, conf.username, conf.password)
# # fields, values = db._change_insert_to_string({}, d)
# # print fields
# # print values
# db.upsert('jd_product', where_dict, data_dict)
