# -*- coding: UTF-8 -*-
import random
import re

import signal
from scrapy import signals
import logging
import scrapy
import traceback
from scrapy.selector import Selector
from scrapy.http import Request
from pangolin.common.date_common import DateCommon
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.common.http_common import *
from pangolin.common.string_common import StringCommon
from pangolin.items import TicketDetailItem
from multiprocessing import Process
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
from pangolin.settings import RETRY_TIMES
from pangolin.middlewares import RotateUserAgentMiddleware
import sys, os
from PIL import Image
from datetime import *
import time
import json

reload(sys)
sys.setdefaultencoding('UTF-8')


class PangolinSpider(scrapy.Spider):
    name = 'tc_ticket_by_url_spider'
    allowed_domain = 'ly.com'  # 允许爬虫运行的域

    def __init__(self, crawler, spider_name, city_code, company_code, task_id, task_date, timer_param,
                 timer_granularity, timer_id, url, lv_product_id, miss_product_flag):
        # 接受入参,全局引用
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT

        self.spider_name = spider_name
        self.city_code = city_code
        self.company_code = company_code
        self.task_id = task_id
        self.task_date = task_date
        self.timer_param = timer_param
        self.timer_granularity = timer_granularity
        self.timer_id = timer_id
        self.url = url
        self.lv_product_id = lv_product_id
        self.miss_product_flag = miss_product_flag
        # 爬虫爬取前先删除同一批次同一城市的数据
        if miss_product_flag == 'FALSE':
            db_operate = DbOperateClass()
            db_operate.delete_pd_and_cd_data("ticket_pdetail_tc", "ticket_cdetail_tc", getCompanyCode(company_code),
                                             city_code, task_id, task_date)
        self.crawler = crawler
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def start_requests(self):
        # 查询需要爬取的产品URL`
        db_operate = DbOperateClass()
        urls = []
        if self.miss_product_flag == 'FALSE':
            sql = "SELECT tc_url FROM cpa_care_product WHERE city_code = '" + self.city_code + "' AND business_code='TICKET'"
            try:
                (fc, count) = db_operate.select_by_sql(sql)
                for row in fc:
                    if row[0] is not None and row[0] != '':
                        url = row[0]
                        urls.append(url)
            except Exception as e:
                logging.error("Exception %s" % e)
                raise
        else:
            urls.append(self.url)
        for url in urls:
            self.count = 0
            # print self.count, url
            if url.find('dujia/wanle') > -1:
                yield Request(url, dont_filter=True, callback=self.outbound_parse)
            else:
                yield Request(url, dont_filter=True, callback=self.domestic_parse)

    def outbound_parse(self, response):
        sels = Selector(response)
        response_url = response.url
        if response.body.find('IE=EmulateIE9') > -1:
            if self.count < RETRY_TIMES:
                self.count += 1
                yield Request(response_url, callback=self.outbound_parse, dont_filter=True)
            else:
                logging.info("Retry %s times.Gave up retry." % RETRY_TIMES)
        else:
            self.count = 0
            try:
                item = TicketDetailItem()
                item['type'] = "TICKET"
                item['timer_param'] = self.timer_param
                item['timer_granularity'] = self.timer_granularity
                item['business_type'] = "ticket"

                item['product_table'] = "ticket_pdetail_tc"
                item['data_type'] = "TicketDetail"
                item['detail_id'] = DateCommon.get_id_by_datetime()
                item['company_code'] = getCompanyCode(self.company_code)
                item['platform_code'] = "PC"  # 操作平台WAP，PC
                item['city_code'] = self.city_code
                scenery_name_tag = sels.xpath('//*[@class="ly_info"]/h1/text()')
                if scenery_name_tag:
                    scenery_name = StringCommon.remove_blank(scenery_name_tag.extract()[0])
                else:
                    scenery_name = ""
                item['scenery_name'] = scenery_name
                scenery_addr_tag = sels.xpath('//*[@class="address-cont"]/p/text()')
                if scenery_addr_tag:
                    scenery_addr = StringCommon.remove_blank(scenery_addr_tag.extract()[0])
                else:
                    scenery_addr = ""
                item['scenery_addr'] = scenery_addr
                item['product_id'] = ''.join(response.url).split('wanle/')[1].split('.html')[0]
                item['detail_url'] = response.url  # 详情页链接
                # 产品详情
                item['lowest_price'] = StringCommon.remove_blank(
                    sels.xpath('//*[@class="price"]/b/text()').extract()[0])  # 最低价
                item['score'] = ""
                image_num = sels.xpath('//*[@id="lineInfo"]').extract()[0]
                item['image_num'] = image_num.count("<img")
                dr = re.compile(r'<[^>]+>', re.S)
                introduction_tag = StringCommon.remove_blank(sels.xpath('//*[@class="order_detail"]').extract()[0])
                item['introduction'] = dr.sub('', introduction_tag.split('<h3>产品说明</h3>')[1].split('<h3>预订限制</h3>')[0])
                if len(item['introduction']) > 500:
                    item['introduction'] = item['introduction'][:450] + '***'
                item['score_tag'] = ""
                item['star_level'] = ""
                item['create_time'] = DateCommon.get_current_date()
                item['update_time'] = item['create_time']
                item['task_id'] = self.task_id
                item['task_date'] = self.task_date
                db_operate = DbOperateClass()
                sql = "SELECT product_id FROM cpa_care_product WHERE tc_url like '%" + response.url + "%'"
                data = db_operate.select_one_by_sql(sql)
                if data is not None:
                    lv_product_id = data[0]
                else:
                    lv_product_id = ""
                item['lv_product_id'] = lv_product_id
                if self.snapshot == 'TRUE':
                    # 获得截图地址
                    element_tag = "//*[@class='ly_hd clearfix']"
                    element_tag1 = "//*[@class='ly_order']"
                    compress_path = self.save_screenshot(response, item['product_id'], self.city_code, element_tag,
                                                         element_tag1, self.company_code, self.task_id, self.task_date)
                    item['snapshot_addr'] = compress_path
                else:
                    item['snapshot_addr'] = " "
                # 商品详情
                item['commodity_table'] = "ticket_cdetail_tc"
                item['commodity_type'] = []  # 商品类型或名称
                item['commodity_id'] = []  # 商品id
                item['commodity_name'] = []  # 商品名称
                item['commodity_url'] = []  # 商品名称
                item['sale_cond'] = []  # 提前时间、销售条件
                item['refund_cond'] = []  # 退改条件
                item['rack_rate'] = []  # 门市价
                item['sale_price'] = []  # 售价
                item['preferential'] = []  # 优惠
                item['price_list_id'] = ""
                sites_evens = sels.css('.book_bd .book_tr')
                if sites_evens:
                    for sites in sites_evens:
                        commodity_type = StringCommon.remove_blank(
                            sites.css('.book_label > em').xpath('text()').extract()[0])
                        child_site = sites.css('.book_ul .book_li')
                        for sites in child_site:
                            commodity_name = StringCommon.remove_blank(
                                sites.css('.txt').xpath('text()').extract()[0])
                            commodity_url = "https:" + sites.css('.bookbtn').xpath('@href').extract()[0]
                            commodity_id = commodity_url.split('productId=')[1].split('&')[0]
                            dr = re.compile(r'<[^>]+>', re.S)
                            sale_cond_tag = StringCommon.remove_blank(sites.css('.td2').extract()[0])
                            sale_cond = dr.sub('', sale_cond_tag)
                            refund_cond_tag = sites.css('.book_info > li:nth-child(2)').extract()
                            refund_cond = dr.sub('', refund_cond_tag[0].split('退改规则')[1]).replace("】", "").replace("：",
                                                                                                                   "") if refund_cond_tag and str(
                                refund_cond_tag).find('退改') > -1 else ""
                            rack_rate = ""
                            sale_price = StringCommon.remove_blank(sites.css('.tcprice::text').extract()[0])
                            preferential_tag = sites.css('.td6 > .J_Tips')
                            if preferential_tag:
                                preferential = dr.sub('', preferential_tag.xpath('@data-content').extract()[0])
                            else:
                                preferential = ""
                            item['commodity_type'].append(commodity_type)  # 商品类型或名称
                            item['commodity_id'].append(commodity_id)  # 商品id
                            item['commodity_name'].append(commodity_name)  # 商品名称
                            item['commodity_url'].append(commodity_url)  # 商品链接
                            item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                            item['refund_cond'].append(refund_cond)  # 退改条件
                            item['rack_rate'].append(rack_rate)  # 门市价
                            item['sale_price'].append(sale_price)  # 售价
                            item['preferential'].append(preferential)  # 优惠
                    yield item
                else:
                    logging.info("此产品当日无可售商品或已下架！%s" % DateCommon.get_current_date())

            except Exception as e:
                self.exception_handel(response, e)

    def domestic_parse(self, response):  # 页面解析函数，这里必须为parse()
        sels = Selector(response)
        response_url = response.url
        # print response.body
        if response.body.find('IE=EmulateIE9') > -1:
            if self.count < RETRY_TIMES:
                self.count += 1
                yield Request(response_url, callback=self.domestic_parse, dont_filter=True)
            else:
                logging.info("Retry %s times.Gave up retry." % RETRY_TIMES)
        else:
            self.count = 0
            try:
                item = TicketDetailItem()
                item['type'] = "TICKET"
                item['timer_param'] = self.timer_param
                item['timer_granularity'] = self.timer_granularity
                item['business_type'] = "ticket"

                item['product_table'] = "ticket_pdetail_tc"
                item['data_type'] = "TicketDetail"
                item['detail_id'] = DateCommon.get_id_by_datetime()
                item['company_code'] = getCompanyCode(self.company_code)
                item['platform_code'] = "PC"  # 操作平台WAP，PC
                item['city_code'] = self.city_code

                scenery_name_tag = sels.xpath('//*[@class="s_name"]/text()')
                scenery_name_tag1 = sels.xpath('//*[@id="content"]/div[3]/div[2]/h3/text()')
                scenery_name_tag2 = sels.xpath('//*[@class="nav_box"]/text()')
                if scenery_name_tag:
                    scenery_name = StringCommon.remove_blank(scenery_name_tag.extract()[0])
                elif scenery_name_tag1:
                    scenery_name = StringCommon.remove_blank(scenery_name_tag1.extract()[0])
                elif scenery_name_tag2:
                    scenery_name = StringCommon.remove_blank(scenery_name_tag2.extract()[0]).split('门票及相关产品')[0]
                else:
                    scenery_name = ""
                item['scenery_name'] = scenery_name
                scenery_addr_tag = sels.xpath('//*[@id="content"]/div[3]/div[2]/p[1]/span/text()')
                scenery_addr_tag1 = sels.xpath('//*[@class="s_com"]/span/text()')
                if scenery_addr_tag:
                    scenery_addr = StringCommon.remove_blank(scenery_addr_tag.extract()[0])
                elif scenery_addr_tag1:
                    scenery_addr = StringCommon.remove_blank(scenery_addr_tag1.extract()[0])
                else:
                    scenery_addr = ""
                item['scenery_addr'] = scenery_addr
                item['product_id'] = ''.join(response.url).split('BookSceneryTicket_')[1].split('.html')[0]
                item['detail_url'] = response.url  # 详情页链接
                # 产品详情
                item['lowest_price'] = StringCommon.remove_blank(
                    sels.xpath('//*[@class="s_p_t"]/span/b/text()').extract()[0])  # 最低价
                item['score'] = StringCommon.remove_blank(
                    sels.xpath('//*[@class="hpl_grade"]/span/text()').extract()[0])  # 评分
                image_num = sels.xpath('//*[@class="b_i_m clearfix"]').extract()[0]
                item['image_num'] = image_num.count("<img")
                item['introduction'] = StringCommon.remove_blank(
                    sels.xpath('//*[@class="s_com open_time canhover"]/span/text()').extract()[0])  # 使用说明

                score_tag = StringCommon.remove_blank(sels.xpath('//*[@class="impress_box"]').extract()[0])  # 评分标签，印象
                dr = re.compile(r'<[^>]+>', re.S)
                score_tag = dr.sub('', score_tag.replace("</a>", "||</a>"))
                item['score_tag'] = score_tag
                star_level_tag = response.css('h3.s_name > span')
                if star_level_tag:
                    star_level_tag = StringCommon.remove_blank(
                        sels.xpath('//*[@class="s_name"]/span').extract()[0])  # 星级
                else:
                    star_level_tag = ""  # 星级
                if star_level_tag.count("A") > 0:
                    item['star_level'] = str(star_level_tag.count("A")) + "A"
                else:
                    item['star_level'] = ""
                item['create_time'] = DateCommon.get_current_date()
                item['update_time'] = item['create_time']
                item['task_id'] = self.task_id
                item['task_date'] = self.task_date
                db_operate = DbOperateClass()
                sql = "SELECT product_id FROM cpa_care_product WHERE tc_url like '%" + response.url + "%'"
                data = db_operate.select_one_by_sql(sql)
                if data is not None:
                    lv_product_id = data[0]
                else:
                    lv_product_id = ""
                item['lv_product_id'] = lv_product_id
                if self.snapshot == 'TRUE':
                    # 获得截图地址
                    element_tag = "//*[@class='info_box_w clearfix']"
                    element_tag1 = "//*[@class='tics mt20']"
                    compress_path = self.save_screenshot(response, item['product_id'], self.city_code, element_tag,
                                                         element_tag1, self.company_code, self.task_id, self.task_date)
                    item['snapshot_addr'] = compress_path
                else:
                    item['snapshot_addr'] = " "
                # 商品详情
                item['commodity_table'] = "ticket_cdetail_tc"
                item['commodity_type'] = []  # 商品类型或名称
                item['commodity_id'] = []  # 商品id
                item['commodity_name'] = []  # 商品名称
                item['commodity_url'] = []  # 商品名称
                item['sale_cond'] = []  # 提前时间、销售条件
                item['refund_cond'] = []  # 退改条件
                item['rack_rate'] = []  # 门市价
                item['sale_price'] = []  # 售价
                item['preferential'] = []  # 优惠
                item['price_list_id'] = ""
                sites_evens = sels.css('.api_item .api_channel')
                if sites_evens:
                    for sites in sites_evens:
                        commodity_type = StringCommon.remove_blank(
                            sites.css('.api_item_name > b').xpath('text()').extract()[0])
                        child_site = sites.css('.api_item_content .api_line')
                        for sites in child_site:
                            commodity_name = StringCommon.remove_blank(
                                sites.css('.p-type > a > b').xpath('text()').extract()[0])
                            commodity_url = "http://www.ly.com" + sites.css('.f_bookbtn_w').xpath('@href').extract()[0]
                            if commodity_url.find("ticketId") != -1:
                                commodity_id = commodity_url.split('ticketId=')[1]
                            elif commodity_url.find("zizhuyou") != -1:
                                commodity_url = "http://zby.ly.com" + sites.css('.f_bookbtn_w').xpath('@href').extract()[0]
                                commodity_id = commodity_url.split('/')[5].split('-')[0]
                            elif commodity_url.find("ProductDetail") != -1:
                                commodity_id = commodity_url.split('ProductDetail_')[1].split('.html')[0]
                            sale_cond_tag = StringCommon.remove_blank(sites.css('.W02_5').extract()[0])  # 评分标签，印象
                            dr = re.compile(r'<[^>]+>', re.S)
                            sale_cond = dr.sub('', sale_cond_tag)
                            refund_cond_tag = StringCommon.remove_blank(sites.css('.W03').extract()[0])  # 评分标签，印象
                            refund_cond = dr.sub('', refund_cond_tag)
                            rack_rate_tag = StringCommon.remove_blank(sites.css('.W07').extract()[0])
                            rack_rate = dr.sub('', rack_rate_tag)
                            sale_price = StringCommon.remove_blank(sites.css('.W04 span::text').extract()[0])
                            preferential_tag = sites.css('.W05 > span::text')
                            if preferential_tag:
                                preferential = StringCommon.remove_blank(sites.css('.W05 > span::text').extract()[0])
                            else:
                                preferential = ""
                            item['commodity_type'].append(commodity_type)  # 商品类型或名称
                            item['commodity_id'].append(commodity_id)  # 商品id
                            item['commodity_name'].append(commodity_name)  # 商品名称
                            item['commodity_url'].append(commodity_url)  # 商品链接
                            item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                            item['refund_cond'].append(refund_cond)  # 退改条件
                            item['rack_rate'].append(rack_rate)  # 门市价
                            item['sale_price'].append(sale_price)  # 售价
                            item['preferential'].append(preferential)  # 优惠
                    yield item
                else:
                    # print "sites_evens为空:", sites_evens
                    relative_url = "http://www.ly.com/scenery/AjaxHelper/SceneryPriceFrame.aspx?action=GETNEWPRICEFRAMEFORLAST&ids=productId&isSimple=1&isShowAppThree=0&widthtype=1&isGrap=1&nobookid=&isyry=1&YpState=1&lon=null&lat=null&isforegin=null&iid=0.9255435850637839"
                    relative_url = relative_url.replace("productId", item['product_id'])
                    request = Request(relative_url, meta={'item': item}, callback=self.parse_commodity_list)
                    yield request

            except Exception as e:
                self.exception_handel(response, e)

    def parse_commodity_list(self, response):
        item = response.meta['item']
        dr = re.compile(r'<[^>]+>', re.S)
        json_data = dr.sub('', response.body)
        if json_data.find("{") > -1:
            sites = json.loads(json_data)
            item['scenery_name'] = sites['SceneryPrices'][0]['DestinationName']
            commodity_data = sites['SceneryPrices'][0]['ChannelPriceModelEntityList']
            if commodity_data is not None:
                for site in commodity_data:
                    commodity_type = site['ConsumersTypeName']
                    channel_price_list = site['ChannelPriceEntityList']
                    for channel_price in channel_price_list:
                        commodity_name = channel_price['TicketName']
                        commodity_url = channel_price['OrderUrl']
                        if commodity_url.find("ticketId") != -1:
                            commodity_url = "http://www.ly.com/" + channel_price['OrderUrl']
                            match = re.search(r'ticketId=\d+', commodity_url)
                            commodity_id = match.group(0).split('ticketId=')[1]
                        elif commodity_url.find("zizhuyou") != -1:
                            commodity_url = "http://zby.ly.com" + channel_price['OrderUrl']
                            commodity_id = commodity_url.split('/')[5].split('-')[0]
                        elif commodity_url.find("ProductDetail") != -1:
                            commodity_url = "http://www.ly.com/" + channel_price['OrderUrl']
                            commodity_id = commodity_url.split('ProductDetail_')[1].split('.html')[0]
                        else:
                            commodity_id = ""
                        sale_cond = channel_price['BookTime']
                        refund_cond = channel_price['RefundModifyRule']
                        rack_rate = channel_price['Amount']
                        sale_price = channel_price['AmountAdvice']
                        preferential = ""
                        preferential_list = channel_price['TicketTagEntityList']
                        if preferential_list is not None:
                            for preferential_json in preferential_list:
                                preferential_id = preferential_json['Id']
                                if str(preferential_id) == '2100602':
                                    preferential = preferential_json['Description']
                        item['commodity_type'].append(commodity_type)  # 商品类型或名称
                        item['commodity_id'].append(commodity_id)  # 商品id
                        item['commodity_name'].append(commodity_name)  # 商品名称
                        item['commodity_url'].append(commodity_url)  # 商品链接
                        item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                        item['refund_cond'].append(refund_cond)  # 退改条件
                        item['rack_rate'].append(rack_rate)  # 门市价
                        item['sale_price'].append(sale_price)  # 售价
                        item['preferential'].append(preferential)  # 优惠
        return item

    # 按元素截图
    def save_screenshot(self, response, product_id, city_code, element_tag, element_tag1, company_code, task_id,
                        task_date):
        compress_path = ""
        try:
            file_name = getCompanyCode(
                company_code) + "__" + city_code + "__" + task_id + "__" + task_date + "__" + product_id + ".png"
            service_args = [
                '--proxy=' + response.meta['proxy'],
                '--load-images=no',
            ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # # 伪造ua信息
            # dcap["phantomjs.page.settings.userAgent"] = (request.headers['User-Agent'])
            middleware = RotateUserAgentMiddleware()
            user_agent = random.choice(middleware.user_agent_list)
            # user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36"
            # dcap["browserName"] = "Netscape"
            # dcap["version"] = "5.0"
            dcap["phantomjs.page.settings.userAgent"] = user_agent
            dcap["phantomjs.page.settings.resourceTimeout"] = 5000  # 超时
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = (response.url)
            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)

            driver.get(response.url)
            # time.sleep(5)
            # print "driver.page_source:", driver.page_source
            # 截图
            path = os.getcwd() + "/screenshot/" + file_name
            logging.info("%s save_screenshot is starting..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.save_screenshot(path)
            # driver.get_screenshot_as_file(path)
            logging.info("%s save_screenshot is end..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

            # 根据元素获得裁剪区域的坐标
            element = driver.find_element_by_xpath(element_tag)
            element1 = driver.find_element_by_xpath(element_tag1)
            left = int(element.location['x'])
            top = int(element.location['y'])
            right = int(element.location['x'] + element.size['width'])
            bottom = int(element.location['y'] + element.size['height'] + element1.size['height'])
            # 根据获得的裁剪区域坐标截图
            compress_path = os.getcwd() + "/compressImage/" + file_name
            im = Image.open(path)
            im = im.crop((left, top, right, bottom))  # (left, top, right, bottom)
            im.save(compress_path)
            os.remove(path)
        except Exception as e:
            logging.error("截图失败 %s ,%s" % (response.url, e))
        finally:
            try:
                driver.service.process.send_signal(signal.SIGTERM)
                driver.quit()
            except Exception as e:
                logging.error("driver.quit_failed %s" % e)
        return compress_path

    def exception_handel(self, response, e):
        logging.error("记录日志：标签变动 %s ,%s" % (response.url, e))
        logging.info("页面源码：%s ,%s" % (response.url, response.body))
        logging.error("Exception: %s" % traceback.format_exc().replace("\"", "'"))
        db_operate = DbOperateClass()
        # 错误描述只截取前500个字符，剩下的用***代替
        error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
        if len(error_desc) > 500:
            error_desc = error_desc[:500] + '***'
        # 错误日志记入日志表
        sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
              % ("C", getCompanyCode(self.company_code), "门票", response.url, self.task_date, "timer_code",
                 "error_code", error_desc, str(self.task_id), str(self.task_date))
        db_operate.update_by_sql(sql)
        if self.save_server == 'TRUE':
            http_common = HttpCommon()
            operate_log = {'type': 'LOG',
                           'log_type': "C",
                           'company_code': getCompanyCode(self.company_code),
                           'busi_type': "门票",
                           'url': response.url,
                           'batch_num': self.task_date,
                           'timer_code': "timer_code",
                           'error_code': "error_code",
                           'error_desc': error_desc,
                           'task_id': str(self.task_id),
                           'task_date': str(self.task_date)}
            http_common.get_method(operate_log)

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        if kwargs['miss_product_flag'] == 'FALSE':
            kwargs['url'] = ''
            kwargs['lv_product_id'] = ''
        else:
            kwargs['timer_granularity'] = ''
            kwargs['timer_id'] = ''

        return cls(
            crawler,
            spider_name=kwargs['spider_name'],
            city_code=kwargs['city_code'],
            company_code=kwargs['company_code'],
            task_id=kwargs['task_id'],
            task_date=kwargs['task_date'],
            timer_param=kwargs['timer_param'],
            timer_granularity=kwargs['timer_granularity'],
            timer_id=kwargs['timer_id'],
            url=kwargs['url'],
            lv_product_id=kwargs['lv_product_id'],
            miss_product_flag=kwargs['miss_product_flag']
        )

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        db_operate = DbOperateClass()
        db_operate.update_status(2, "PYTHON", self.timer_id)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_detail: %s ,reason:%s' % (spider.name, reason))
        db_operate = DbOperateClass()
        if self.miss_product_flag == 'TRUE':  # 补录
            db_operate.update_miss_status(1, self.url, self.task_id)
        else:
            sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' " \
                  "AND timer.timer_id = '" + self.timer_id + "'"
            (fc, count) = db_operate.select_by_sql(sql)
            spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
            timer_status = ""
            for row in fc:
                timer_status = row[0]
            logging.info("timer_status:%s" % timer_status)
            if timer_status == '2':
                db_operate.update_status(5, "PYTHON", self.timer_id)
