# -*- coding:utf-8 -*-

"""
    @version: 1.0
    @author: Danson
    @software: PyCharm
    @file: xc_hotel_by_url_spider_splash.py
    @time: 2017/8/07
    @description:根据携程 PC站的url获取酒店相关数据(通过splash渲染页面获取数据)
    @todo:
"""

import random
import requests
import scrapy
import signal
from scrapy.selector import Selector

from scrapy_splash import SplashRequest
import traceback
from scrapy import signals
import os, sys
from pangolin.common.date_common import DateCommon
from pangolin.common.string_common import StringCommon
from pangolin.middlewares import RotateUserAgentMiddleware
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.items import HotelDetailItem
from pangolin.common.http_common import *
import logging
import re
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from PIL import Image
from datetime import *
import time as times

reload(sys)
sys.setdefaultencoding('UTF-8')


class XCHotelSpider(scrapy.Spider):
    name = 'xc_hotel_by_url_pc_spider_splash'
    allowed_domain = 'ctrip.come'

    # allowed_domain = "ip138.com"
    def __init__(self, crawler, spider_name, city_code, company_code, task_id, task_date, timer_param,
                 timer_granularity, timer_id, url, lv_product_id, miss_product_flag):
        # 接受入参,全局引用
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT

        self.spider_name = spider_name
        self.city_code = city_code
        self.company_code = company_code
        self.task_id = task_id
        self.task_date = task_date
        self.timer_param = timer_param
        self.timer_granularity = timer_granularity
        self.timer_id = timer_id
        self.url = url
        self.lv_product_id = lv_product_id
        self.miss_product_flag = miss_product_flag
        self.splash_request = True

        db_operate = DbOperateClass()
        urls = []
        lv_product_ids = {}
        if self.miss_product_flag == 'FALSE':
            sql = "SELECT xc_url,product_id FROM cpa_care_product WHERE city_code = '" + self.city_code + \
                  "' AND business_code='HOTEL'"
            try:
                (fc, count) = db_operate.select_by_sql(sql)
                for row in fc:
                    if row[0] is not None:
                        url = row[0]
                        lv_product_id = row[1]
                        lv_product_ids.setdefault(url, lv_product_id)
                        urls.append(url)
                self.lv_product_ids = lv_product_ids
            except Exception as e:
                logging.info("Exception %s" % e)
                raise
            # 爬虫爬取前先删除同一批次同一城市的数据
            db_operate = DbOperateClass()
            db_operate.delete_pd_and_cd_data("hotel_pdetail", "hotel_cdetail", getCompanyCode(company_code),
                                             city_code, task_id, task_date)
        else:
            lv_product_ids.setdefault(self.url, self.lv_product_id)
            self.lv_product_ids = lv_product_ids
            urls.append(self.url)
        self.lv_product_ids = lv_product_ids
        self.start_urls = urls

        self.base_urls = "http://hotels.ctrip.com"
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        self.crawler = crawler
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def start_requests(self):
        for url in self.start_urls:
            self.count = 0
            yield SplashRequest(url, self.parse, args={'wait': 10.5, 'dont_process_response': False})

    def parse(self, response):
        if response.status == 200:
            url = response.url
            sel = Selector(response)
            if len(sel.xpath('//*[@id="div_minprice"]/p[1]/span[2]/text()').extract()) < 1:
                if self.count < 3:
                    self.count += 1
                    yield SplashRequest(url, callback=self.parse, dont_filter=True)
                else:
                    self.count = 0
                    yield SplashRequest(url, dont_filter=True)
            else:
                sel = Selector(response)
                item = HotelDetailItem()
                try:
                    item['type'] = "HOTEL"
                    item['timer_param'] = self.timer_param
                    item['timer_granularity'] = self.timer_granularity
                    item['business_type'] = "hotel"

                    item['product_table'] = "hotel_pdetail"
                    item['data_type'] = "HotelDetail"
                    item['detail_id'] = DateCommon.get_id_by_datetime()
                    item['company_code'] = getCompanyCode(self.company_code)
                    item['platform_code'] = "PC"  # 操作平台WAP，PC
                    item['city_code'] = self.city_code
                    mode = re.compile(r'\d+')
                    hotel_name = sel.xpath('//*[@id="J_htl_info"]/div[2]/h2[1]/text()').extract()  # 酒店名称
                    if len(hotel_name) > 0:
                        item['hotel_name'] = StringCommon.remove_blank(hotel_name[0])
                    else:
                        item['hotel_name'] = ""
                    product_id = sel.xpath('//*[@id="hotel"]/@value').extract()
                    # if len(product_id) > 0:
                    item['product_id'] = StringCommon.remove_blank(product_id[0])
                    # else:
                    #     item['product_id']=mode.findall(response.url)[0]
                    item['come_date'] = DateCommon.get_current_date()
                    item['leave_date'] = DateCommon.get_other_date(1)
                    city_name = ""
                    cityName = sel.xpath('//*[@id="ctl00_MainContentPlaceHolder_commonHead_lnkCity"]/text()').extract()
                    if len(cityName) > 0:
                        city_name = StringCommon.remove_blank(cityName[0])
                    loc = ""
                    location = sel.xpath(
                        '//*[@id="ctl00_MainContentPlaceHolder_commonHead_lnkLocation"]/text()').extract()
                    if len(location) > 0:
                        loc = StringCommon.remove_blank(location[0])
                    add = ""
                    address = sel.xpath('//*[@id="ctl00_MainContentPlaceHolder_commonHead_lbAddress"]/text()').extract()
                    if len(address) > 0:
                        add = StringCommon.remove_blank(address[0])
                    ro = ""
                    road = sel.xpath('//*[@id="ctl00_MainContentPlaceHolder_commonHead_lnkRoadCross"]/text()').extract()
                    if len(road) > 0:
                        ro = StringCommon.remove_blank(road[0])
                    item['hotel_addr'] = city_name + loc + add + ro
                    lowest_price = sel.xpath('//*[@id="div_minprice"]/p[1]/span[2]/text()').extract()  # 最低价
                    item['lowest_price'] = StringCommon.remove_blank(lowest_price[0])
                    score = sel.xpath('//*[@id="base_bd"]/div[7]/div[2]/div[1]/div/a/p[1]/span/text()').extract()
                    if len(score) > 0:
                        item['score'] = score[0]  # 评分
                    else:
                        item['score'] = ""
                    image_num = sel.xpath('//*[@id="view_allpic"]/text()').extract()

                    if len(image_num) > 0:
                        item['image_num'] = mode.findall(image_num[0])[0]
                    else:
                        item['image_num'] = ''
                    item['introduction'] = ""
                    score_tag = sel.xpath('//*[@id="base_bd"]/div[7]/div[2]/div[1]/div/a/span[1]/text()').extract()
                    if len(score_tag) > 0:
                        item['score_tag'] = score_tag[0]
                    else:
                        item['score_tag'] = ""
                    starLevel = sel.xpath('//*[@id="ctl00_MainContentPlaceHolder_commonHead_imgStar"]/@title').extract()
                    if len(starLevel) > 0:
                        item['star_level'] = starLevel[0]
                    else:
                        item['star_level'] = ""
                    item['detail_url'] = response.url  # 详情页链接
                    if self.snapshot == 'TRUE':
                        item['snapshot_addr'] = " "
                        # 获得截图地址
                        # compress_path = self.save_screenshot(response, item['product_id'], self.city_code,
                        #                                      self.company_code, self.task_id, self.task_date)
                        # item['snapshot_addr'] = compress_path
                    else:
                        item['snapshot_addr'] = " "
                    item['lv_product_id'] = self.lv_product_ids.get(response.url)
                    item['create_time'] = DateCommon.get_current_date()
                    item['update_time'] = item['create_time']
                    item['task_id'] = self.task_id
                    item['task_date'] = self.task_date

                    # 商品详情
                    item['commodity_table'] = "hotel_cdetail"
                    item['room_type'] = []  # 房型
                    item['commodity_id'] = []  # 商品id
                    item['area'] = []  # 房间面积
                    item['floor'] = []  # 楼层
                    item['commodity_name'] = []  # 商品名称
                    item['bed_type'] = []  # 床型
                    item['broadband'] = []  # 宽带
                    item['breakfast'] = []  # 早餐
                    item['window'] = []  # 窗户
                    item['increase_bed'] = []  # 加床
                    item['people_num'] = []  # 入住人数
                    item['commodity_url'] = []  # 商品预定链接
                    item['cancel_rule'] = []  # 取消(退订)规则
                    item['preferential'] = []  # 优惠
                    item['sale_price'] = []  # 售价

                    roomList = sel.xpath('//*[@id="J_RoomListTbl"]/tbody/tr[@expand]')
                    for i, room in enumerate(roomList):
                        brid = room.xpath('@brid').extract()[0]
                        room_type = StringCommon.remove_blank(
                            sel.xpath('//*[@id="' + brid + '"]/a[2]/text()').extract()[0])
                        item['room_type'].append(room_type)
                        cid = room.css('td.child_name').xpath('@data-roomid').extract()[0]
                        item['commodity_id'].append(cid)
                        # J_RoomListTbl > tbody > tr:nth-child(3) > td.child_name > span.room_type_name
                        commodity_name = room.css('td.child_name > span.room_type_name::text').extract()
                        em_content = room.css('td.child_name > span.room_type_name > em ::text').extract()
                        if len(commodity_name) > 0 and len(em_content) > 0:
                            item['commodity_name'].append(commodity_name[0] + em_content[0])
                        elif len(commodity_name) > 0:
                            item['commodity_name'].append(commodity_name[0])
                        elif len(em_content) > 0:
                            item['commodity_name'].append(em_content[0])
                        else:
                            item['commodity_name'].append('')
                        bed = room.css('td.col3::text').extract()
                        if len(bed) > 0:
                            item['bed_type'].append(bed[0])
                        else:
                            item['bed_type'].append("")
                        breakfast = room.css('td.col4::text').extract()[0]
                        item['breakfast'].append(breakfast)
                        broadband = room.css('td.col5 > span::text').extract()[0]
                        item['broadband'].append(broadband)
                        people_num = room.css('td.col_person > span').xpath('@title').extract()
                        if len(people_num) > 0:
                            item['people_num'].append(mode.findall(people_num[0])[0])
                        else:
                            item['people_num'].append('')
                        cancel = room.css('td.col_policy > span.room_policy::text').extract()
                        if len(cancel) > 0:
                            item['cancel_rule'].append(cancel[0])
                        else:
                            item['cancel_rule'].append("")
                        price = room.css('.base_box > .base_price::text').extract()
                        item['sale_price'].append(price[0])
                        item['increase_bed'].append('')
                        item['window'].append('')
                        item['commodity_url'].append('')
                        item['preferential'].append('')
                        item['area'].append('')
                        item['floor'].append('')
                    # print item
                    yield item
                except Exception as e:
                    self.exception_handel(response, e)
        else:
            yield SplashRequest(response.url, self.parse, args={'wait': 10.5})

    # 按元素截图
    def save_screenshot(self, response, product_id, city_code, company_code, task_id, task_date):
        compress_path = ""
        try:
            file_name = getCompanyCode(
                company_code) + "__" + city_code + "__" + task_id + "__" + task_date + "__" + product_id + ".png"
            service_args = [
                '--proxy=' + response.meta['proxy'],
                # '--load-images=no',
                # '--ignore-ssl-errors=true',
                '--ssl-protocol=any'
            ]
            dcap = dict(DesiredCapabilities.PHANTOMJS)
            # # 伪造ua信息
            middleware = RotateUserAgentMiddleware()
            user_agent = random.choice(middleware.user_agent_list)
            dcap["phantomjs.page.settings.userAgent"] = user_agent
            dcap["phantomjs.page.settings.resourceTimeout"] = 5000  # 超时
            dcap["browserName"] = "Netscape"
            dcap["version"] = "5.0"
            # 添加头文件
            dcap["phantomjs.page.customHeaders.Referer"] = response.url
            driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)  # 线上使用方式
            logging.info("%s PhantomJS is getUrl..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.set_page_load_timeout(100)
            driver.set_script_timeout(100)
            driver.get(response.url)
            times.sleep(2)
            # print driver.page_source
            # 截图
            path = os.getcwd() + "/screenshot/" + file_name
            compress_path = os.getcwd() + "/compressImage/" + file_name
            logging.info("%s save_screenshot is starting..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            driver.save_screenshot(path)
            logging.info("%s save_screenshot is end..." % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            # 根据元素获得裁剪区域的坐标
            element = driver.find_element_by_css_selector('.shadow')
            element1 = driver.find_element_by_xpath("//*[@class='content-wrap sceneryBook']")
            left = int(element.location['x'])
            top = int(element.location['y'])
            right = int(element.location['x'] + element.size['width'])
            bottom = int(element.location['y'] + element.size['height'] + element1.size['height'])
            # 根据获得的裁剪区域坐标截图
            compress_path = os.getcwd() + "/compressImage/" + file_name
            im = Image.open(path)
            im = im.crop((left, top, right, bottom))  # (left, top, right, bottom)
            im.save(compress_path)
            os.remove(path)
        except Exception as e:
            logging.error("截图失败 %s ,%s" % (response.url, e))
            logging.error("截图 页面源码：%s ,%s" % (response.url, driver.page_source))
        finally:
            try:
                driver.service.process.send_signal(signal.SIGTERM)
                driver.quit()
            except Exception as e:
                logging.error("driver.quit_failed %s" % e)
        return compress_path

    def handel_erro(self, response):
        logging.info("页面下载失败！！！")

    def exception_handel(self, response, e):
        logging.error("记录日志：标签变动 %s ,%s" % (response.url, e))
        logging.info("页面源码：%s ,%s" % (response.url, response.body))
        logging.error("Exception: %s" % traceback.format_exc().replace("\"", "'"))
        db_operate = DbOperateClass()
        # 错误描述只截取前500个字符，剩下的用***代替
        error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
        if len(error_desc) > 500:
            error_desc = error_desc[:500] + '***'
        # 错误日志记入日志表
        sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
              % ("C", getCompanyCode(self.company_code), "酒店", response.url, self.task_date, "timer_code",
                 "error_code", error_desc, str(self.task_id), str(self.task_date))
        db_operate.update_by_sql(sql)
        if self.save_server == 'TRUE':
            http_common = HttpCommon()
            operate_log = {'type': 'LOG',
                           'log_type': "C",
                           'company_code': getCompanyCode(self.company_code),
                           'busi_type': "酒店",
                           'url': response.url,
                           'batch_num': self.task_date,
                           'timer_code': "timer_code",
                           'error_code': "error_code",
                           'error_desc': error_desc,
                           'task_id': str(self.task_id),
                           'task_date': str(self.task_date)}
            http_common.get_method(operate_log)

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        if kwargs['miss_product_flag'] == 'FALSE':
            kwargs['url'] = ''
            kwargs['lv_product_id'] = ''
        else:
            kwargs['timer_granularity'] = ''
            kwargs['timer_id'] = ''

        return cls(
            crawler,
            spider_name=kwargs['spider_name'],
            city_code=kwargs['city_code'],
            company_code=kwargs['company_code'],
            task_id=kwargs['task_id'],
            task_date=kwargs['task_date'],
            timer_param=kwargs['timer_param'],
            timer_granularity=kwargs['timer_granularity'],
            timer_id=kwargs['timer_id'],
            url=kwargs['url'],
            lv_product_id=kwargs['lv_product_id'],
            miss_product_flag=kwargs['miss_product_flag']
        )

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        if self.miss_product_flag == 'FALSE':
            db_operate = DbOperateClass()
            db_operate.update_status(2, "PYTHON", self.timer_id)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_list: %s ,reason:%s' % (spider.name, reason))
        db_operate = DbOperateClass()
        if self.miss_product_flag == 'TRUE':  # 补录
            db_operate.update_miss_status(1, self.url, self.task_id)
        elif self.miss_product_flag == 'FALSE':
            sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' AND timer.timer_id " \
                  "= '" + self.timer_id + "'"
            (fc, count) = db_operate.select_by_sql(sql)
            spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
            timer_status = ""
            for row in fc:
                timer_status = row[0]
            logging.info("timer_status:%s" % timer_status)
            if timer_status == '2':
                db_operate.update_status(5, "PYTHON", self.timer_id)
