# -*- coding: UTF-8 -*-
import re
import scrapy
from scrapy.selector import Selector
from pangolin.items import TicketPlistItem
from scrapy.http import Request
import traceback
from scrapy import signals
import os, sys
import subprocess
from pangolin.common.date_common import DateCommon
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.common.string_common import StringCommon
from pangolin.items import TicketDetailItem
from pangolin.common.http_common import *
from pangolin.settings import RETRY_TIMES
import logging
import re
from urlparse import urljoin
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from PIL import Image
from datetime import *
import time
import signal

reload(sys)
sys.setdefaultencoding('UTF-8')


class PangolinSpider(scrapy.Spider):

    name = 'tn_ticket_list_by_url_spider'
    allowed_domain = 'tuniu.com'  # 允许爬虫运行的域
    
    def __init__(self, crawler, spider_name, city_code, company_code, task_id, task_date, timer_param, timer_granularity,
                 timer_id,url ,lv_product_id):
        # 接受入参,全局引用
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT
        self.spider_name = spider_name
        self.city_code = city_code
        self.company_code = company_code
        self.task_id = task_id
        self.task_date = task_date
        self.timer_param = timer_param
        self.timer_granularity = timer_granularity
        self.timer_id = timer_id
        self.url = url
        self.lv_product_id = lv_product_id
        self.miss_product_flag = 'FALSE'
        first_page_url = []
        lv_product_ids = {}
        if url == "":
            db_operate = DbOperateClass()
            sql = "SELECT tn_url,product_id FROM cpa_care_product WHERE city_code = '" +city_code+"' AND business_code='TICKET'"
            logging.info("SELECT_SQL:%s" % sql)
            (fc, count) = db_operate.select_by_sql(sql)

            for row in fc:
                if  row[0] is not None:
                    if row[0].strip() :
                        scenery_url = StringCommon.remove_blank(row[0])
                        lv_product_id = StringCommon.remove_blank(row[1])
                        lv_product_ids.setdefault(scenery_url,lv_product_id)
                        first_page_url.append(scenery_url)

            # 爬虫爬取前先删除同一批次同一城市的数据
            db_operate.delete_pd_and_cd_data("ticket_pdetail_tn", "ticket_cdetail_tn", getCompanyCode(company_code),
                city_code, task_id, task_date)
        else :
            lv_product_ids.setdefault(url,lv_product_id)
            first_page_url.append(url)
            self.miss_product_flag = 'TRUE'

        self.start_urls = first_page_url
        self.lv_product_ids = lv_product_ids
        self.base_urls = "http://menpiao.tuniu.com/"
        self.product_table = "ticket_pdetail_tn"  # 产品表
        self.commodity_table = "ticket_cdetail_tn"  # 商品表
        self.count = 0  # 重试次数

        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        self.crawler = crawler
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def parse(self, response):  # 提取数据到Items里面，主要用到XPath和CSS选择器提取网页数据
        sels = Selector(response)
        try:
            item = TicketDetailItem()
            item['type'] = "TICKET"
            item['timer_param'] = self.timer_param
            item['timer_granularity'] = self.timer_granularity
            item['business_type'] = "ticket"

            item['product_table'] = self.product_table
            item['data_type'] = "TicketDetail"
            item['detail_id'] = DateCommon.get_id_by_datetime()
            item['company_code'] = getCompanyCode(self.company_code)
            item['platform_code'] = "PC"  # 操作平台WAP，PC
            item['city_code'] = self.city_code
            item['scenery_name'] = StringCommon.remove_blank(
                sels.xpath('//*[@class="v2_ct_title"]/text()').extract()[0])  # 景点名称
            item['scenery_addr'] = StringCommon.remove_blank(
                sels.xpath('//*[@class="v2_detail_address v2_tp_text_com"]/span/text()').extract()[0])  # 景点地址
            item['detail_url'] = response.url  # 详情页链接
            item['product_id'] = sels.xpath('//*[@id="poiId"]').xpath('@value').extract()[0]
            # 产品详情
            if sels.xpath('//*[@class="v2-money"]/text()'):
                lowest_price = StringCommon.remove_blank(
                   sels.xpath('//*[@class="v2-money"]/text()').extract()[0])  # 最低价
                match = re.search(r'\d+', lowest_price)
                item['lowest_price'] = match.group(0)
            else:
                item['lowest_price'] = ""

            score = sels.xpath('//*[@class="v2_tp_sat"]/span/text()')
            if score:
                item['score'] = StringCommon.remove_blank(sels.xpath('//*[@class="v2_tp_sat"]/span/text()').extract()[0])  # 评分
            else:
                item['score'] = ""
            introduction = sels.xpath('//*[@class="tp_tips"]/span/text()')  # 说明
            if introduction:
                item['introduction'] = StringCommon.remove_blank(
                    sels.xpath('//*[@class="tp_tips"]/text()').extract()[0])  # 使用说明
            else:
                item['introduction'] = ""

            socre_tag = sels.xpath('//*[@class="tpc_1"]/text()')  # 印象
            if socre_tag:
                socre_tag = StringCommon.remove_blank(sels.xpath('//*[@class="tpc_1"]/text()').extract()[0])
                item['score_tag'] = socre_tag
            else:
                item['score_tag'] = ""
            image_num = sels.xpath('//*[@class="es-carousel"]/ul').extract()[0]
            item['image_num'] = image_num.count("<li")
            item['star_level'] = ""  # 星级
            item['create_time'] = DateCommon.get_current_date()
            item['update_time'] = item['create_time']
            item['task_id'] = self.task_id
            item['task_date'] = self.task_date
            item['lv_product_id'] = self.lv_product_ids.get(response.url)
            if self.snapshot == 'TRUE':
                # 获得截图地址
                compress_path = self.save_screenshot(response, item['product_id'], self.city_code,
                                                     self.company_code, self.task_id, self.task_date)
                item['snapshot_addr'] = compress_path
            else:
                item['snapshot_addr'] = " "
            # 商品详情
            item['commodity_table'] = self.commodity_table
            item['commodity_type'] = []  # 商品类型或名称
            item['commodity_id'] = []  # 商品id
            item['commodity_name'] = []  # 商品名称
            item['commodity_url'] = []  # 商品名称
            item['sale_cond'] = []  # 提前时间、销售条件
            item['refund_cond'] = []  # 退改条件
            item['rack_rate'] = []  # 门市价
            item['sale_price'] = []  # 售价
            item['preferential'] = []  # 优惠
            item['price_list_id'] = ""
            product_tag = sels.css('.v2_line_box.line_box')
            if product_tag:
                sites_evens = sels.css('.content_10.content >li')
                commodity_type = "门票"
                for sites in sites_evens:
                    tag = sites.css('.ticket_type')  # 门票 or 专项 等标签
                    if tag:
                        commodity_type_tag = StringCommon.remove_blank(
                            sites.css('.ticket_type').xpath('text()').extract()[0])  # 商品类型
                        # 如果有标签 则说明该li是标签行
                        commodity_type = commodity_type_tag
                    else:
                        commodity_name = sites.css('.f_0053aa').xpath('text()')
                        if commodity_name:
                            commodity_name = StringCommon.remove_blank(sites.css('.f_0053aa').xpath('text()').extract()[0]) #商品名
                        else:
                            commodity_name = ""

                        commodity_url = StringCommon.remove_blank(sites.css('.v2_paytype>form').xpath('@action').extract()[0])
                        commodity_url = urljoin(self.base_urls, commodity_url)
                        commodity_id = commodity_url.split("productId=")[1].split("&ticketId=")[0]
                        sale_cond = StringCommon.remove_blank(sites.css('.l_time.v2_product_com').xpath('text()').extract()[0])

                        sale_tag = sites.css('.right_area >dl')  # 退改条件所在区域标签
                        if sale_tag:
                            for sale in sale_tag:
                                if sale.css('.clearfix>dd'):
                                    for refund_tag in sale.css('.clearfix>dd').xpath('text()').extract():
                                        if refund_tag.find('如需退票') != -1 :
                                            refund_cond = refund_tag
                                            break
                                        if refund_tag.find('退改') != -1 :
                                            refund_cond = refund_tag
                                            break
                                        else:
                                            refund_cond = ""
                        else:
                            refund_cond = ""  # 退改条件
                        rack_rate = StringCommon.remove_blank(
                            sites.css('.l_g_price.g_price_color > strike').xpath('text()').extract()[0])  # 门市价
                        mode = re.compile(r'\d+')
                        rack_rate = mode.findall(rack_rate)[0]

                        sale_price = StringCommon.remove_blank(
                            sites.css('.money').xpath('text()').extract()[0])  # 途牛价
                        sale_price = mode.findall(sale_price)[0]

                        preferential_tag_saleOff = sites.css('.saleOffNew').xpath('text()')
                        preferential_tag_fanOff = sites.css('.fanOff >span').xpath('text()')
                        if preferential_tag_saleOff:  # 促 标签
                            preferential = StringCommon.remove_blank(
                                sites.css('.saleOffNew').xpath('text()').extract()[0])
                        elif preferential_tag_fanOff:  # 返 标签
                            preferential = StringCommon.remove_blank(
                                sites.css('.fanOff >span').xpath('text()').extract()[0])
                        else:
                            preferential = ""

                        item['commodity_type'].append(commodity_type)  # 商品类型或名称
                        item['commodity_id'].append(commodity_id)  # 商品id
                        item['commodity_name'].append(commodity_name)  # 商品名称
                        item['commodity_url'].append(commodity_url)  # 商品链接
                        item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                        item['refund_cond'].append(refund_cond)  # 退改条件
                        item['rack_rate'].append(rack_rate)  # 门市价
                        item['sale_price'].append(sale_price)  # 售价
                        item['preferential'].append(preferential)  # 优惠
                        time.sleep(2)
                yield item
            else:
                logging.info("%s_无此产品或此产品已下架！！！" % self.spider_name)
                logging.info("页面源码：%s ,%s" % (response.url, response.body))
        except Exception as e:
            db_operate = DbOperateClass()
            logging.info("记录日志: %s ,%s" % (response.url, e))
            logging.info("页面源码：%s ,%s" % (response.url, response.body))
            logging.error("tn_ticket_list_by_url_spider_Exception: %s" % traceback.format_exc().replace("\"", "'"))
            # 错误描述只截取前500个字符，剩下的用***代替
            error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
            if len(error_desc) > 500:
                error_desc = error_desc[:500] + '***'
            # 错误日志记入日志表
            sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
                  % ("C", getCompanyCode(self.company_code), "门票", response.url, self.task_date, "timer_code",
                     "error_code", error_desc, str(self.task_id), str(self.task_date))
            logging.info("INSERT_SQL:%s" % sql)
            db_operate.update_by_sql(sql)
            # tn 验证码页面url不变。所以当第一个标签报错时候重试三次
            if "list index out of range" in error_desc:
                logging.info("记录日志,页面源码"+response.body)
                if self.count < RETRY_TIMES:
                    self.count += 1
                    time.sleep(3)
                    yield Request(response.url, callback=self.parse, dont_filter=True)
                else:
                    pass


                # 按元素截图
    def save_screenshot(self, response, product_id, city_code, company_code, task_id, task_date):
        compress_path = ""
        try:
            file_name = getCompanyCode(
                company_code) + "__" + city_code + "__" + task_id + "__" + task_date + "__" + product_id + ".png"
            service_args = [
                '--proxy=' + response.meta['proxy'],
                '--load-images=no', ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # 伪造ua信息
            # dcap["phantomjs.page.settings.userAgent"] = (request.headers['User-Agent'])
            dcap["phantomjs.page.settings.resourceTimeout"] = 50000  # wait 50 seconds
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = (response.url)

            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s TN PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)
            driver.get(response.url)

            # 截图
            path = os.getcwd() + "/screenshot/" + file_name
            logging.info("%s TN save_screenshot is starting..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.save_screenshot(path)
            logging.info("%s TN save_screenshot is end..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

            # 根据元素获得裁剪区域的坐标
            element = driver.find_element_by_xpath("//*[@class='v2_ticket_proinf clearfix']")
            element1 = driver.find_element_by_xpath("//*[@class='v2_line_box line_box']")
            left = int(element.location['x'])
            top = int(element.location['y'])
            right = int(element.location['x'] + element.size['width'])
            bottom = int(element.location['y'] + element.size['height'] + element1.size['height'])

            # 根据获得的裁剪区域坐标截图
            compress_path = os.getcwd() + "/compressImage/" + file_name
            im = Image.open(path)
            im = im.crop((left, top, right, bottom))  # (left, top, right, bottom)
            im.save(compress_path)
            os.remove(path)
        except Exception as e:
            logging.error("截图失败 %s ,%s" % (response.url, e))
        finally:
            try:
                driver.service.process.send_signal(signal.SIGTERM)
                driver.quit()
            except Exception as e:
                logging.error("tn driver.quit_failed %s" % e)
        return compress_path

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')

        if kwargs['miss_product_flag'] == "FALSE" :
            kwargs['url'] = ''
            kwargs['lv_product_id'] = ''
        else :    
            kwargs['timer_granularity'] = ''
            kwargs['timer_id'] = ''

        return cls(
            crawler,
            spider_name=kwargs['spider_name'],
            city_code=kwargs['city_code'],
            company_code=kwargs['company_code'],
            task_id=kwargs['task_id'],
            task_date=kwargs['task_date'],
            timer_param=kwargs['timer_param'],
            timer_granularity=kwargs['timer_granularity'],
            timer_id=kwargs['timer_id'],
            url=kwargs['url'],
            lv_product_id=kwargs['lv_product_id']
            )

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        db_operate = DbOperateClass()
        db_operate.update_status(2, "PYTHON", self.timer_id)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_list: %s ,reason:%s ,timerId:%s' % (spider.name, reason,self.timer_id))

        db_operate = DbOperateClass()
        if self.miss_product_flag == 'TRUE':  # 补录
            db_operate.update_miss_status(1, self.url,self.task_id)
        else :    
            sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' AND timer.timer_id = '" \
                + self.timer_id + "'"
            logging.info("SELECT_SQL:%s" % sql)
            (fc, count) = db_operate.select_by_sql(sql)
            spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
            timer_status = ""
            for row in fc:
                timer_status = row[0]
            logging.info("timer_status:%s" % timer_status)
            if timer_status == '2':
                db_operate.update_status(5, "PYTHON", self.timer_id)
            else:
                pass