# -*- coding:utf-8 -*- 

"""
    @version: 1.0
    @author: Danson
    @software: PyCharm
    @file: mt_ticket_by_url_m_spider.py
    @time: 2017/9/4 17:05
    @description:根据美团 WAP站的url获取酒店相关数据(通过basic接口获取景点基本信息，通过business接口获取商品数据)
    @todo:
"""
import random
import re

import signal
import string

from scrapy import signals
import logging
import scrapy
import traceback
from scrapy.http import Request
from pangolin.common.date_common import DateCommon
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.common.string_common import StringCommon
from pangolin.items import TicketDetailItem
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from pangolin.common.http_common import *
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
import sys, os
from datetime import *
import time

reload(sys)
sys.setdefaultencoding('UTF-8')


class PangolinSpider(scrapy.Spider):
    name = 'mt_ticket_by_url_m_spider'
    allowed_domain = 'meituan.com'  # 允许爬虫运行的域

    def __init__(self, crawler, spider_name, city_code, company_code, task_id, task_date, timer_param,
                 timer_granularity, timer_id, url, lv_product_id, miss_product_flag):
        # 接受入参,全局引用
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT

        self.spider_name = spider_name
        self.city_code = city_code
        self.company_code = company_code
        self.task_id = task_id
        self.task_date = task_date
        self.timer_param = timer_param
        self.timer_granularity = timer_granularity
        self.timer_id = timer_id
        self.url = url
        self.lv_product_id = lv_product_id
        self.miss_product_flag = miss_product_flag
        # 爬虫爬取前先删除同一批次同一城市的数据
        if miss_product_flag == 'FALSE':
            db_operate = DbOperateClass()
            db_operate.delete_pd_and_cd_data("ticket_pdetail_mt", "ticket_cdetail_mt", getCompanyCode(company_code),
                                             city_code, task_id, task_date)
        self.crawler = crawler
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def start_requests(self):
        # 查询需要爬取的产品URL
        db_operate = DbOperateClass()
        urls = []
        lv_product_ids = {}
        mode = re.compile(r'poiId=\d+')
        if self.miss_product_flag == 'FALSE':
            # 正常按城市抓取
            sql = "SELECT mt_url,product_id FROM cpa_care_product WHERE city_code = '" + self.city_code + "' AND business_code='TICKET'"
            try:

                (fc, count) = db_operate.select_by_sql(sql)
                for row in fc:
                    if row[0] is not None and row[0] != '':
                        url = row[0]
                        lv_product_id = row[1]
                        poi_id = mode.findall(url)[0].split('poiId=')[1]  # 避免出现poiId=1086762&...格式数据
                        product_url = "http://itrip.meituan.com/volga/api/v3/trip/poi/basic/info/{}?poiId={}".format(
                            poi_id, poi_id)
                        lv_product_ids.setdefault(product_url, lv_product_id)
                        urls.append(product_url)
                self.lv_product_ids = lv_product_ids
            except Exception as e:
                logging.info("Exception %s" % e)
                raise
        else:
            # 补抓取
            poi_id = mode.findall(self.url)[0].split('poiId=')[1]
            url = "http://itrip.meituan.com/volga/api/v3/trip/poi/basic/info/{}?poiId={}".format(poi_id, poi_id)
            lv_product_ids.setdefault(url, self.lv_product_id)
            self.lv_product_ids = lv_product_ids
            urls.append(url)
        for url in urls:
            yield Request(url, dont_filter=True)

    def parse(self, response):  # 页面解析函数，这里必须为parse()
        sels = json.loads(response.body)
        try:
            item = TicketDetailItem()
            item['type'] = "TICKET"
            item['timer_param'] = self.timer_param
            item['timer_granularity'] = self.timer_granularity
            item['business_type'] = "ticket"

            item['product_table'] = "ticket_pdetail_mt"
            item['data_type'] = "TicketDetail"
            item['detail_id'] = DateCommon.get_id_by_datetime()
            item['company_code'] = getCompanyCode(self.company_code)
            item['platform_code'] = "WAP"  # 操作平台WAP，PC
            item['city_code'] = self.city_code
            item['scenery_name'] = sels['poiBasicInfo']['poiInfo']['name']
            item['scenery_addr'] = sels['poiBasicInfo']['poiInfo']['addr']
            product_id = sels['poiBasicInfo']['poiInfo']['poiid']
            item['product_id'] = product_id
            item['detail_url'] = "http://i.meituan.com/awp/h5/lvyou/poi/detail/index.html?poiId={}".format(product_id)
            # 产品详情
            # item['lowest_price'] = ""
            item['score'] = sels['poiBasicInfo']['poiInfo']['commentModel']['avgscore']
            item['image_num'] = sels['poiBasicInfo']['imgListSize']
            city_id = sels['poiBasicInfo']['poiInfo']['cityId']
            item['introduction'] = ""
            item['score_tag'] = ""
            star_level = sels['poiBasicInfo']['poiInfo']
            item['star_level'] = star_level['tourPlaceStar'] if 'tourPlaceStar' in star_level else ""
            item['create_time'] = DateCommon.get_current_date()
            item['update_time'] = item['create_time']
            item['task_id'] = self.task_id
            item['task_date'] = self.task_date
            item['lv_product_id'] = self.lv_product_ids.get(response.url)
            if self.snapshot == 'TRUE':
                # 获得截图地址
                compress_path = self.save_screenshot(response, item['product_id'], self.city_code, item['detail_url'],
                                                     self.company_code, self.task_id, self.task_date)
                item['snapshot_addr'] = compress_path
            else:
                item['snapshot_addr'] = " "
            # 商品详情
            item['commodity_table'] = "ticket_cdetail_mt"
            item['commodity_type'] = []  # 商品类型或名称
            item['commodity_id'] = []  # 商品id
            item['commodity_name'] = []  # 商品名称
            item['commodity_url'] = []  # 商品名称
            item['sale_cond'] = []  # 提前时间、销售条件
            item['refund_cond'] = []  # 退改条件
            item['rack_rate'] = []  # 门市价
            item['sale_price'] = []  # 售价
            item['preferential'] = []  # 优惠
            item['price_list_id'] = ""
            item['booking_effective_time'] = []
            # relative_url = "http://itrip.meituan.com/volga/api/v3/trip/poi/business/info/{}?poiId={}".format(product_id,product_id)

            """
             MT门票商品接口新增了token验证，目前此token有漏洞，只需要改变token值即可，MT尚未做token生成规则验证.20170926
             目前思路：随机生成1个一位字符串和1个五位字符串替换掉从MT获取的token部分值(尽可能避免token值相同)
            """
            rondom_str = ''.join(random.sample(string.ascii_letters + string.digits, 1))
            rondom_str1 = ''.join(random.sample(string.ascii_letters + string.digits, 5))
            token = "eJxVjF0LgkAQRf%252FLPC%252Fuun6sCRJCENtbYr2ID6aiS36haxnRf28EIYKBM3O4c98wygJ8kzFmcgJ6wt1{}hrs08vJ" \
                    "lnEsj%252FHd9h7DZeD%252BAnwuXEtAVLVxOhSCz8EJaTkt%252FGbZw1ITEAtdaDT6ky2lLpOeuMvG{}9p9hxo7dDm8epn" \
                    "OvSKFqXOVENVV5SLUeu22aOVRcAFd22LAwHAxjbGRuR9Y7ZRb5xU1YEP5WmJL5UMw2MVRucggM8XBApD%252BQ%253D%253D" \
                .format(rondom_str, rondom_str1)
            relative_url = "http://itrip.meituan.com/volga/api/v3/trip/poi/business/info/{}?poiId={}&source=mt&client" \
                           "=wap&uuid=A56B3CD6ED58658759BE1AE6CF893D781CD77838ED619383D21A85551FDBA8B6&cityId=10&fecl" \
                           "ient=lvyou_wap&platform=4&partner=11&originUrl=http%3A%2F%2Fi.meituan.com%2Fawp%2Fh5%2Flv" \
                           "you%2Fpoi%2Fdetail%2Findex.html%3FpoiId%3D{}&_token={}" \
                .format(product_id, product_id, product_id, token)
            request = Request(relative_url, meta={'item': item}, callback=self.parse_commodity_list)
            yield request

        except Exception as e:
            db_operate = DbOperateClass()
            logging.error("记录日志：标签变动 %s ,%s" % (response.url, e))
            logging.info("页面源码：%s ,%s" % (response.url, response.body))
            logging.error("Exception: %s" % traceback.format_exc().replace("\"", "'"))
            # 错误描述只截取前500个字符，剩下的用***代替
            error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
            if len(error_desc) > 500:
                error_desc = error_desc[:500] + '***'
            # 错误日志记入日志表
            sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
                  % ("C", getCompanyCode(self.company_code), "门票", response.url, self.task_date, "timer_code",
                     "error_code", error_desc, str(self.task_id), str(self.task_date))
            db_operate.update_by_sql(sql)
            if self.save_server == 'TRUE':
                http_common = HttpCommon()
                operate_log = {'type': 'LOG',
                               'log_type': "C",
                               'company_code': getCompanyCode(self.company_code),
                               'busi_type': "门票",
                               'url': response.url,
                               'batch_num': self.task_date,
                               'timer_code': "timer_code",
                               'error_code': "error_code",
                               'error_desc': error_desc,
                               'task_id': str(self.task_id),
                               'task_date': str(self.task_date)}
                http_common.get_method(operate_log)

    def parse_commodity_list(self, response):
        item = response.meta['item']
        if response.body.find("{") > -1:
            sites = json.loads(response.body)
            commodity_data = sites['deals']['data']
            if commodity_data is not None:
                for site in commodity_data:
                    commodity_type = site['productName']
                    for channel_price in site['productModels']:
                        preferential = ""
                        if site['needTicketGroup'] is True:
                            commodity_type = site['productName'] + channel_price['ticketName']
                            for ticket_deals in channel_price['ticketDeals']:
                                if ticket_deals['title5'].find(',') > -1:
                                    commodity_name = ticket_deals['title5'].split(',')[0]
                                    booking_effective_time = ticket_deals['title5'].split(',')[1]
                                else:
                                    commodity_name = ticket_deals['title5']
                                    booking_effective_time = ""
                                commodity_id = ticket_deals['id']
                                commodity_url = "http://i.meituan.com/trip/lvyou/order/buy/mtp/{}".format(commodity_id)
                                sale_cond = ticket_deals['listTags'][0]['title'] + "。" + booking_effective_time
                                refund_cond = ticket_deals['listTags'][1]['title'].replace("官方", "")
                                rack_rate = ticket_deals['value']
                                sale_price = ticket_deals['price']

                                item['commodity_type'].append(commodity_type)  # 商品类型或名称
                                item['commodity_id'].append(commodity_id)  # 商品id
                                item['commodity_name'].append(commodity_name)  # 商品名称
                                item['commodity_url'].append(commodity_url)  # 商品链接
                                item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                                item['refund_cond'].append(refund_cond)  # 退改条件
                                item['rack_rate'].append(rack_rate)  # 门市价
                                item['sale_price'].append(sale_price)  # 售价
                                item['preferential'].append(preferential)  # 优惠
                                item['booking_effective_time'].append(booking_effective_time)
                        elif site['productType'] == 'MT_TJ':
                            # 获取美团推荐商品会导致商品重复，因此排除"美团推荐"
                            # 如需获取美团推荐商品，删除此elif且放开下方else中代码段1
                            pass
                        else:
                            # if channel_price['title5'].find(',') > -1:
                            #     commodity_name = channel_price['title5'].split(',')[0]
                            #     sale_cond = channel_price['title5'].split(',')[1]
                            # else:
                            #     commodity_name = channel_price['title5']
                            #     sale_cond = ""
                            commodity_name = channel_price['title5']
                            booking_effective_time = ""
                            sale_cond = ""
                            commodity_id = channel_price['id']
                            commodity_url = "http://i.meituan.com/trip/lvyou/order/buy/mtp/{}".format(commodity_id)
                            # 代码段1：获取美团推荐商品中的预售条件和是否可退
                            # if site['productType'] == 'MT_TJ':
                            #     sale_cond = channel_price['listTags'][0]['title'] + sale_cond
                            #     refund_cond = channel_price['listTags'][1]['title']
                            # else:
                            #     refund_cond = ""
                            refund_cond = ""
                            rack_rate = channel_price['value']
                            sale_price = channel_price['price']

                            item['commodity_type'].append(commodity_type)  # 商品类型或名称
                            item['commodity_id'].append(commodity_id)  # 商品id
                            item['commodity_name'].append(commodity_name)  # 商品名称
                            item['commodity_url'].append(commodity_url)  # 商品链接
                            item['sale_cond'].append(sale_cond)  # 提前时间、销售条件
                            item['refund_cond'].append(refund_cond)  # 退改条件
                            item['rack_rate'].append(rack_rate)  # 门市价
                            item['sale_price'].append(sale_price)  # 售价
                            item['preferential'].append(preferential)  # 优惠
                            item['booking_effective_time'].append(booking_effective_time)
                sale_price_list = sorted(item['sale_price'])  # 将item['sale_price']按照从小到大排序，获取最低价
                item['lowest_price'] = sale_price_list[0]
                return item
            else:
                logging.info("此产品当日无可售商品或已下架！%s" % DateCommon.get_current_date())

    # 按元素截图
    def save_screenshot(self, response, product_id, city_code, detail_url, company_code, task_id, task_date):
        compress_path = ""
        try:
            file_name = getCompanyCode(
                company_code) + "__" + city_code + "__" + task_id + "__" + task_date + "__" + product_id + ".png"
            service_args = [
                '--proxy=' + response.meta['proxy'],
                '--load-images=no', ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # # 伪造ua信息
            # dcap["phantomjs.page.settings.userAgent"] = (request.headers['User-Agent'])
            dcap["phantomjs.page.settings.resourceTimeout"] = 5000  # 超时
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = (response.url)
            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)
            driver.get(detail_url)

            # 截图
            compress_path = os.getcwd() + "/compressImage/" + file_name
            logging.info("%s save_screenshot is starting..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.save_screenshot(compress_path)
            logging.info("%s save_screenshot is end..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        except Exception as e:
            logging.error("截图失败 %s ,%s" % (detail_url, e))
        finally:
            try:
                driver.service.process.send_signal(signal.SIGTERM)
                driver.quit()
            except Exception as e:
                logging.error("driver.quit_failed %s" % e)
        return compress_path

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        if kwargs['miss_product_flag'] == 'FALSE':
            kwargs['url'] = ''
            kwargs['lv_product_id'] = ''
        else:
            kwargs['timer_granularity'] = ''
            kwargs['timer_id'] = ''

        return cls(
            crawler,
            spider_name=kwargs['spider_name'],
            city_code=kwargs['city_code'],
            company_code=kwargs['company_code'],
            task_id=kwargs['task_id'],
            task_date=kwargs['task_date'],
            timer_param=kwargs['timer_param'],
            timer_granularity=kwargs['timer_granularity'],
            timer_id=kwargs['timer_id'],
            url=kwargs['url'],
            lv_product_id=kwargs['lv_product_id'],
            miss_product_flag=kwargs['miss_product_flag']
        )

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        db_operate = DbOperateClass()
        db_operate.update_status(2, "PYTHON", self.timer_id)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_detail: %s ,reason:%s' % (spider.name, reason))
        db_operate = DbOperateClass()
        if self.miss_product_flag == 'TRUE':  # 补录
            db_operate.update_miss_status(1, self.url, self.task_id)
        else:
            sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' " \
                  "AND timer.timer_id = '" + self.timer_id + "'"
            (fc, count) = db_operate.select_by_sql(sql)
            spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
            timer_status = ""
            for row in fc:
                timer_status = row[0]
            logging.info("timer_status:%s" % timer_status)
            if timer_status == '2':
                db_operate.update_status(5, "PYTHON", self.timer_id)
