# -*- coding: UTF-8 -*-
import scrapy
from scrapy.selector import Selector

from scrapy.http import Request
import traceback
from scrapy import signals
import os, sys
import subprocess
from pangolin.common.date_common import DateCommon
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.common.string_common import StringCommon
from pangolin.items import TicketDetailItem
from pangolin.common.http_common import *
import logging
import re
from urlparse import urljoin
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from PIL import Image
from datetime import *
import time

reload(sys)
sys.setdefaultencoding('UTF-8')


class PangolinSpider(scrapy.Spider):
    name = 'qnr_ticket_by_url_m_spider'
    allowed_domain = 'qunar.com'  # 允许爬虫运行的域

    # start_urls = [
    #     'http://www.ly.com/scenery/scenerysearchlist_2_41__0__0__0_0_0.html']

    def __init__(self, crawler, spiderName, cityCode, companyCode, taskId, taskDate, timerParam, timerGranularity,
                 timerId):
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT
        # 接受入参,全局引用
        self.spiderName = spiderName
        self.cityCode = cityCode
        self.companyCode = companyCode
        self.taskId = taskId
        self.taskDate = taskDate
        self.timerParam = timerParam
        self.timerGranularity = timerGranularity
        self.timerId = timerId

        # 根据cityCode获得城市名称，并且设置起始页以及下一页
        db_operate = DbOperateClass()
        sql = "SELECT qnr_url FROM cpa_care_product WHERE city_code = '" + self.cityCode + "' AND business_code='TICKET'"
        logging.info("SELECT_SQL:%s" % sql)
        (fc, count) = db_operate.select_by_sql(sql)
        first_page_url = []
        for row in fc:
            if row[0] is not None:
                url = row[0]
                first_page_url.append(url)
        self.start_urls = first_page_url
        self.base_urls = "http://touch.piao.qunar.com"
        # 爬虫爬取前先删除同一批次同一城市的数据
        # db_operate.delete_pd_list_data("ticket_plist_qnr", getCompanyCode(companyCode), cityCode, taskId, taskDate)
        db_operate.delete_pd_and_cd_data("ticket_pdetail_qnr", "ticket_cdetail_qnr", getCompanyCode(companyCode),
                                         cityCode, taskId, taskDate)
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        self.crawler = crawler
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def parse(self, response):  # 页面解析函数，这里必须为parse()
        lowest_price = ""
        sels = Selector(response)
        try:
            item = TicketDetailItem()
            item['type'] = "TICKET"
            item['timer_param'] = self.timerParam
            item['timer_granularity'] = self.timerGranularity
            item['business_type'] = "ticket"

            item['product_table'] = "ticket_pdetail_qnr"
            item['data_type'] = "TicketDetail"
            item['detail_id'] = DateCommon.get_id_by_datetime()
            item['company_code'] = getCompanyCode(self.companyCode)
            item['platform_code'] = "WAP"  # 操作平台WAP，PC
            item['city_code'] = self.cityCode
            item['scenery_name'] = StringCommon.remove_blank(
                sels.xpath('//*[@class="mp-headfeagure-title"]/text()').extract()[0]).replace("A", "").replace("景区",
                                                                                                               "").replace(
                "(", "").replace(")", "")  # 景点名称
            item['scenery_addr'] = StringCommon.remove_blank(
                sels.xpath('//*[@class="mp-baseinfo-address-txt"]/text()').extract()[0])  # 景点地址
            item['detail_url'] = response.url  # 详情页链接
            item['product_id'] = item['detail_url'].split('detail_')[1].split('.html')[0]

            # 产品详情
            item['lowest_price'] = lowest_price
            item['score'] = sels.xpath('//*[@class="mp-comments-tagscore"]/text()').extract()[0]  # 评分
            item['image_num'] = ""
            # introduction_tag = StringCommon.remove_blank(
            #     sels.xpath('//*[@class="mp-baseinfo-opentime-inner"]').extract()[0])  # 使用说明
            # dr = re.compile(r'<[^>]+>', re.S)
            # introduction = dr.sub('', introduction_tag)
            item['introduction'] = ""

            # score_tag = StringCommon.remove_blank(
            #     sels.xpath('//*[@class="mp-baseinfo mp-border-bottom"]/span/i[1]/text()').extract()[0])  # 评分标签，印象
            score_tag = StringCommon.remove_blank(
                sels.xpath('//*[@class="mp-iconfont mpf-starlevel-gain"]/text()').extract()[0])  # 评分标签，印象
            dr = re.compile(r'<[^>]+>', re.S)
            score_tag = dr.sub('', score_tag)
            item['score_tag'] = score_tag.replace("B", "☆")
            if item['scenery_name'].count("A") > 0:
                item['star_level'] = str(item['scenery_name'].count("A")) + "A"
            else:
                item['star_level'] = ""
            item['create_time'] = DateCommon.get_current_date()
            item['update_time'] = item['create_time']
            item['task_id'] = self.taskId
            item['task_date'] = self.taskDate
            db_operate = DbOperateClass()
            sql = "SELECT product_id FROM cpa_care_product WHERE qnr_url like '%" + response.url + "%'"
            logging.info("SELECT_product_id_SQL:%s" % sql)
            data = db_operate.select_one_by_sql(sql)
            if data[0] is not None:
                lv_product_id = data[0]
            else:
                lv_product_id = ""
            item['lv_product_id'] = lv_product_id
            if self.snapshot == 'TRUE':
                # 获得截图地址
                compress_path = self.save_screenshot(response, item['product_id'], self.cityCode,
                                                     self.companyCode, self.taskId, self.taskDate)
                item['snapshot_addr'] = compress_path
            else:
                item['snapshot_addr'] = " "
            # 商品详情
            item['commodity_table'] = "ticket_cdetail_qnr"
            item['commodity_type'] = []  # 商品类型或名称
            item['commodity_id'] = []  # 商品id
            item['commodity_name'] = []  # 商品名称
            item['commodity_url'] = []  # 商品名称
            item['sale_cond'] = []  # 提前时间、销售条件
            item['refund_cond'] = []  # 退改条件
            item['rack_rate'] = []  # 门市价
            item['sale_price'] = []  # 售价
            item['preferential'] = []  # 优惠
            item['price_list_id'] = ""
            product_tag = sels.css('.mp-ticket-container')
            if product_tag:
                sites_evens = sels.css('.mp-ticket-container .mp-ticket-group')
                for i, sites in enumerate(sites_evens):
                    commodity_type = sites.css('.mp-ticket-type.mp-border-bottom').xpath('text()').extract()[0]
                    child_site = sites.css('.mp-ticket-group .mp-ticket-list')
                    for child_sites in child_site:
                        commodity_name = StringCommon.remove_blank(
                            child_sites.css('.mp-ticket-type-name').xpath('text()').extract()[0])
                        commodity_url_list = child_sites.css('.mp-ticket-link').xpath('@href').extract()
                        commodity_url_tag = sites.css('.mp-ticket-list > a')
                        if len(commodity_url_list) > 0:
                            commodity_url = urljoin(self.base_urls, commodity_url_list[0])
                        elif commodity_url_tag:
                            commodity_url = sites.css('.mp-ticket-list > a').xpath('@href').extract()[0]
                        else:
                            commodity_url = ""
                        commodity_id = child_sites.css('.mp-ticket-list').xpath('@data-typeid').extract()[0]
                        sale_cond = ""
                        refund_cond = ""
                        rack_rate = ""
                        sale_price = StringCommon.remove_blank(
                            child_sites.css('.mp-ticket-type-price > em').xpath('text()').extract()[0])
                        preferential = ""

                        item['commodity_type'].append(commodity_type)  # 商品类型或名称
                        item['commodity_id'].append(commodity_id)  # 商品id
                        item['commodity_name'].append(commodity_name)  # 商品名称
                        item['commodity_url'].append(commodity_url)  # 商品链接
                        item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                        item['refund_cond'].append(refund_cond)  # 退改条件
                        item['rack_rate'].append(rack_rate)  # 门市价
                        item['sale_price'].append(sale_price)  # 售价
                        item['preferential'].append(preferential)  # 优惠
                yield item
            else:
                yield item
        except Exception as e:
            db_operate = DbOperateClass()
            logging.info("记录日志：标签变动 %s ,%s" % (response.url, e))
            logging.info("页面源码：%s ,%s" % (response.url, e))
            logging.error("qnr_ticket_by_scenery_m_spider_Exception: %s" % traceback.format_exc().replace("\"", "'"))
            # 错误描述只截取前500个字符，剩下的用***代替
            error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
            if len(error_desc) > 500:
                error_desc = error_desc[:500] + '***'
            # 错误日志记入日志表
            sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
                  % ("C", getCompanyCode(self.companyCode), "门票", response.url, self.taskDate, "timer_code",
                     "error_code", error_desc, str(self.taskId), str(self.taskDate))
            db_operate.update_by_sql(sql)
            if self.save_server == 'TRUE':
                http_common = HttpCommon()
                operate_log = {'type': 'LOG',
                               'log_type': "C",
                               'company_code': getCompanyCode(self.companyCode),
                               'busi_type': "门票",
                               'url': response.url,
                               'batch_num': self.taskDate,
                               'timer_code': "timer_code",
                               'error_code': "error_code",
                               'error_desc': error_desc,
                               'task_id': str(self.taskId),
                               'task_date': str(self.taskDate)}
                http_common.get_method(operate_log)

    # 按元素截图
    def save_screenshot(self, response, productId, cityCode, companyCode, taskId, taskDate):
        compress_path = ""
        try:
            fileName = getCompanyCode(
                companyCode) + "__" + cityCode + "__" + taskId + "__" + taskDate + "__" + productId + ".png"
            service_args = [
                '--proxy=' + response.meta['proxy'],
                '--load-images=no', ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # # 伪造ua信息
            # dcap["phantomjs.page.settings.userAgent"] = (request.headers['User-Agent'])
            dcap["phantomjs.page.settings.resourceTimeout"] = 5000  # 超时
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = (response.url)
            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)
            driver.get(response.url)

            # 截图
            path = os.getcwd() + "/screenshot/" + fileName
            logging.info("%s save_screenshot is starting..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.save_screenshot(path)
            logging.info("%s save_screenshot is end..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

            # 根据元素获得裁剪区域的坐标
            element = driver.find_element_by_xpath("//*[@class='mp-baseinfo mp-border-bottom']")
            element1 = driver.find_element_by_xpath("//*[@class='mp-ticket-container']")
            left = int(element.location['x'])
            top = int(element.location['y'])
            right = int(element.location['x'] + element.size['width'])
            bottom = int(element.location['y'] + element.size['height'] + element1.size['height'])
            # 根据获得的裁剪区域坐标截图
            compress_path = os.getcwd() + "/compressImage/" + fileName
            im = Image.open(path)
            im = im.crop((left, top, right, bottom))  # (left, top, right, bottom)
            im.save(compress_path)
            os.remove(path)
        except Exception as e:
            logging.error("截图失败 %s ,%s" % (response.url, e))
        finally:
            try:
                driver.quit()
            except Exception as e:
                logging.error("driver.quit_failed %s" % e)
        return compress_path

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        # 循环遍历出入参
        args_key = {'spiderName': '', 'cityCode': '', 'companyCode': '', 'taskId': '', 'taskDate': '', 'timerParam': '',
                    'timerGranularity': '', 'timerId': ''}
        arg = {}
        for key in args_key:
            if key in kwargs:
                arg[key] = kwargs[key]
        return cls(
            crawler,
            spiderName=arg['spiderName'],
            cityCode=arg['cityCode'],
            companyCode=arg['companyCode'],
            taskId=arg['taskId'],
            taskDate=arg['taskDate'],
            timerParam=arg['timerParam'],
            timerGranularity=arg['timerGranularity'],
            timerId=arg['timerId'])

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        db_operate = DbOperateClass()
        db_operate.update_status(2, "PYTHON", self.timerId)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_list: %s ,reason:%s' % (spider.name, reason))
        if self.save_server == 'TRUE':
            time.sleep(2)
            http_common = HttpCommon()
            payload = {'type': 'TASK',
                       'company_code': getCompanyCode(self.companyCode),
                       'business_type': 'TICKET',
                       'task_id': self.taskId,
                       'timer_param': self.timerParam,
                       'timer_granularity': self.timerGranularity,
                       'city_code': self.cityCode}
            http_common.get_method(payload)
        db_operate = DbOperateClass()
        sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' AND timer.timer_id = '" \
              + self.timerId + "'"
        (fc, count) = db_operate.select_by_sql(sql)
        spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
        timer_status = ""
        for row in fc:
            timer_status = row[0]
        logging.info("timer_status:%s" % timer_status)
        if timer_status == '2':
            db_operate.update_status(5, "PYTHON", self.timerId)
