# -*- coding:utf-8 -*- 

"""
    @version: 1.0
    @author: Danson
    @software: PyCharm
    @file: qnr_hotel_by_url_pc_splash_spider.py
    @time: 2017/7/20 20:20
    @description:获取去哪儿酒店相关数据（WAP端）
    @todo:
"""

import random
import requests
import scrapy
import signal
from scrapy.selector import Selector

from scrapy_splash import SplashRequest
import traceback
from scrapy import signals
import os, sys
from pangolin.common.date_common import DateCommon
from pangolin.common.string_common import StringCommon
from pangolin.middlewares import RotateUserAgentMiddleware
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
from pangolin.items import HotelDetailItem
from pangolin.common.http_common import *
import logging
import re
from urlparse import urljoin
from pangolin.settings import IS_SAVE_SERVER
from pangolin.settings import IS_SNAPSHOT
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from PIL import Image
from datetime import *
import time as times

reload(sys)
sys.setdefaultencoding('UTF-8')


class QnrHotelSpider(scrapy.Spider):
    name = 'qnr_hotel_by_url_pc_splash_spider'
    allowed_domain = 'qunar.com'

    # Custom settings
    # settings = Settings({
    #     # Override the download handler
    #     "DOWNLOAD_HANDLERS": {
    #         'http': 'scrapy_spider.SplashDownloadHandler',
    #         'https': 'scrapy_spider.SplashDownloadHandler',
    #     }
    # })

    def __init__(self, crawler, spider_name, city_code, company_code, task_id, task_date, timer_param,
                 timer_granularity, timer_id, url, lv_product_id, miss_product_flag):
        # 接受入参,全局引用
        self.save_server = IS_SAVE_SERVER
        self.snapshot = IS_SNAPSHOT

        self.spider_name = spider_name
        self.city_code = city_code
        self.company_code = company_code
        self.task_id = task_id
        self.task_date = task_date
        self.timer_param = timer_param
        self.timer_granularity = timer_granularity
        self.timer_id = timer_id
        self.url = url
        self.lv_product_id = lv_product_id
        self.miss_product_flag = miss_product_flag
        self.splash_request = True

        db_operate = DbOperateClass()
        urls = []
        lv_product_ids = {}
        if self.miss_product_flag == 'FALSE':
            sql = "SELECT qnr_url,product_id FROM cpa_care_product WHERE city_code = '" + self.city_code + \
                  "' AND business_code='HOTEL'"
            try:
                (fc, count) = db_operate.select_by_sql(sql)
                for row in fc:
                    if row[0] is not None and row[0] != '':
                        url = row[0]
                        lv_product_id = row[1]
                        lv_product_ids.setdefault(url, lv_product_id)
                        urls.append(url)
                self.lv_product_ids = lv_product_ids
            except Exception as e:
                logging.info("Exception %s" % e)
                raise
            # 爬虫爬取前先删除同一批次同一城市的数据
            db_operate = DbOperateClass()
            db_operate.delete_pd_and_cd_data("hotel_pdetail", "hotel_cdetail", getCompanyCode(company_code),
                                             city_code, task_id, task_date)
        else:
            lv_product_ids.setdefault(self.url, self.lv_product_id)
            self.lv_product_ids = lv_product_ids
            urls.append(self.url)
        self.lv_product_ids = lv_product_ids
        self.start_urls = urls

        self.base_urls = "http://hotel.qunar.com"
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        self.crawler = crawler
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def start_requests(self):
        for url in self.start_urls:
            script = """
                    function main(splash, args)
                        assert(splash:go(args.url))
                        splash:autoload("https://code.jquery.com/jquery-2.1.3.min.js")
                        splash:runjs("$('.btn-book-ct').click()")
                        assert(splash:wait(5.5))
                        return {
                            html = splash:html(),
                            png = splash:png(),
                            har = splash:har(),
                         }
                    end
                    """
            yield SplashRequest(url, self.parse,
                                endpoint='execute',
                                args={
                                    'wait': 10.5,
                                    'lua_source': script,
                                })

    def parse(self, response):
        # print response.body
        sel = Selector(response)
        item = HotelDetailItem()
        try:
            item['type'] = "HOTEL"
            item['timer_param'] = self.timer_param
            item['timer_granularity'] = self.timer_granularity
            item['business_type'] = "hotel"

            item['product_table'] = "hotel_pdetail"
            item['data_type'] = "HotelDetail"
            item['detail_id'] = DateCommon.get_id_by_datetime()
            item['company_code'] = getCompanyCode(self.company_code)
            item['platform_code'] = "PC"  # 操作平台WAP，PC
            item['city_code'] = self.city_code
            if sel.css('.b-baseinfo-title > h2 > span'):
                item['hotel_name'] = sel.xpath('//*[@class="b-baseinfo-title"]/h2/span/text()').extract()[0]  # 酒店名称
            elif sel.css('.htl-info > h2 > span'):
                item['hotel_name'] = sel.xpath('//*[@class="htl-info fr"]/h2/span/text()').extract()[0]  # 酒店名称
            else:
                item['hotel_name'] = ""

            mode = re.compile(r'\d+')
            item['product_id'] = response.url.split("dt-")[1].split("/?")[0]
            item['come_date'] = DateCommon.get_current_date()
            item['leave_date'] = DateCommon.get_other_date(1)
            item['hotel_addr'] = StringCommon.remove_blank(sel.xpath('//*[@class="adress"]/span/text()').extract()[0])
            if sel.css('#toRoomtool'):
                item['lowest_price'] = sel.xpath('//*[@id="toRoomtool"]/div/b/text()').extract()[0]
            elif sel.css('.b_facilityarea > div.e-lowprice > a'):
                item['lowest_price'] = sel.xpath('//*[@class="b_facilityarea"]/div[2]/a/text()').extract()[0]
            else:
                item['lowest_price'] = ""
            item['score'] = sel.xpath('//*[@class="score_mt clrfix"]/span/b/text()').extract()[0]
            if sel.css('#imgTotal'):
                image_num = sel.xpath('//*[@id="imgTotal"]/text()').extract()[0]
            elif sel.css('.js_imageWallShowMore'):
                image_num = sel.xpath('//*[@class="e-morepic js_imageWallShowMore"]/a/text()').extract()[0]
            else:
                image_num = ""
            item['image_num'] = mode.findall(image_num)[0]
            item['introduction'] = ""
            item['score_tag'] = ""
            if sel.css('em.star'):
                item['star_level'] = sel.css('em.star').xpath('@title').extract()[0]  # 星级
            elif sel.xpath('//em[@class="dangci"]'):
                item['star_level'] = sel.xpath('//em[@class="dangci"]').xpath('string(.)').extract()[0]  # 星级
            else:
                item['star_level'] = ""
            item['detail_url'] = response.url  # 详情页链接
            if self.snapshot == 'TRUE':
                item['snapshot_addr'] = " "
                # 获得截图地址
                # compress_path = self.save_screenshot(response, item['product_id'], self.city_code,
                #                                      self.company_code, self.task_id, self.task_date)
                # item['snapshot_addr'] = compress_path
            else:
                item['snapshot_addr'] = " "
            item['lv_product_id'] = self.lv_product_ids.get(response.url)
            item['create_time'] = DateCommon.get_current_date()
            item['update_time'] = item['create_time']
            item['task_id'] = self.task_id
            item['task_date'] = self.task_date

            # 商品详情
            item['commodity_table'] = "hotel_cdetail"
            item['room_type'] = []  # 房型
            item['commodity_id'] = []  # 商品id
            item['area'] = []  # 房间面积
            item['floor'] = []  # 楼层
            item['commodity_name'] = []  # 商品名称
            item['bed_type'] = []  # 床型
            item['broadband'] = []  # 宽带
            item['breakfast'] = []  # 早餐
            item['window'] = []  # 窗户
            item['increase_bed'] = []  # 加床
            item['people_num'] = []  # 入住人数
            item['commodity_url'] = []  # 商品预定链接
            item['cancel_rule'] = []  # 取消(退订)规则
            item['preferential'] = []  # 优惠
            item['sale_price'] = []  # 售价
            if sel.css('#roomTool'):
                room_tag = sel.css('#roomTool')
            elif sel.css('.m-room-tools'):
                room_tag = sel.css('.m-room-tools')
            else:
                room_tag = ""
            if room_tag:
                sites_evens = room_tag.css('.m-room-tools-bd .room-item-inner')
                for site in sites_evens:
                    room_type = site.css('.rtype > h2 > a::text').extract()[0]
                    if site.css('.room-area'):
                        room_area = site.css('.room-area').extract()[0]
                        if room_area.find('面积') > -1:
                            area = room_area.split('面积')[1].split('</cite>')[0]
                        else:
                            area = ""
                        if room_area.find('位于') > -1:
                            floor = room_area.split('位于')[1].split('</cite>')[0]
                        else:
                            floor = ""
                        if room_area.find('窗') > -1:
                            if site.css('.room-area > cite:nth-child(5)'):
                                window = site.css('.room-area > cite:nth-child(5)::text').extract()[0]
                            elif site.css('.room-area > cite:nth-child(4)'):
                                window = site.css('.room-area > cite:nth-child(4)::text').extract()[0]
                            elif site.css('.room-area > cite:nth-child(3)'):
                                window = site.css('.room-area > cite:nth-child(3)::text').extract()[0]
                            else:
                                window = ""
                        else:
                            window = ""
                        if room_area.find('span') > -1:
                            bed_type = room_area.split('</span>')[1].split('</cite>')[0]
                        else:
                            bed_type = ""
                        if room_area.find('(') > -1:
                            increase_bed = room_area.split('(')[1].split(')')[0]
                        else:
                            increase_bed = ""
                    else:
                        area = ""
                        floor = ""
                        window = ""
                        bed_type = ""
                        increase_bed = ""

                    if site.css('.facily-list'):
                        facily_list = site.css('.facily-list').extract()[0]
                        if facily_list.find('ren') > -1:
                            people_num = facily_list.split('ren" title="')[1].split('"')[0]
                        else:
                            people_num = ""
                        if facily_list.find('broadband') > -1:
                            net = facily_list.split('icon-broadband" title="')[1].split('"')[0] + "。"
                        else:
                            net = ""
                        if facily_list.find('icon-wifi') > -1:
                            wifi = facily_list.split('icon-wifi" title="')[1].split('"')[0]
                        else:
                            wifi = ""
                        broadband = net + wifi
                    else:
                        people_num = ""
                        broadband = ""

                    sites_even = site.css('.js-result-list .room-type-default')
                    for channel_site in sites_even:
                        commodity_name = channel_site.css('.js-product::text').extract()[0]
                        breakfast = channel_site.css('.e4 > p::text').extract()[0]
                        if channel_site.css('.js-cancel > em'):
                            cancel_rule = channel_site.css('.js-cancel > em::text').extract()[0]
                        else:
                            cancel_rule = ""
                        if channel_site.css('.tbl-same-type').xpath('@onclick'):
                            commodity_url_tag = channel_site.css('.tbl-same-type').xpath('@onclick').extract()[0]
                            commodity_url = self.base_urls + commodity_url_tag.split("open('")[1].split("'")[0]
                            commodity_id = commodity_url.split('roomId=')[1].split('&')[0].replace("_", "")
                        else:
                            commodity_url = ""
                            commodity_id = ""
                        preferential_tag = channel_site.css('.return-cash')
                        if preferential_tag:
                            preferential = channel_site.css('.return-cash').extract()[0]
                            dr = re.compile(r'<[^>]+>', re.S)
                            preferential = dr.sub('', preferential)
                        else:
                            preferential = ""
                        sale_price = channel_site.css('.sprice::text').extract()[0]

                        item['room_type'].append(room_type)  # 商品类型或名称
                        item['commodity_id'].append(commodity_id)  # 商品id
                        item['area'].append(area)  # 房间面积
                        item['floor'].append(floor)  # 楼层
                        item['commodity_name'].append(commodity_name)  # 商品名称
                        item['bed_type'].append(bed_type)  # 床型
                        item['broadband'].append(broadband)  # 宽带
                        item['breakfast'].append(breakfast)  # 早餐
                        item['window'].append(window)  # 窗户
                        item['increase_bed'].append(increase_bed)  # 加床
                        item['people_num'].append(people_num)  # 入住人数
                        item['commodity_url'].append(commodity_url)  # 商品预定链接
                        item['cancel_rule'].append(cancel_rule)  # 取消(退订)规则
                        item['preferential'].append(preferential)  # 优惠
                        item['sale_price'].append(sale_price)  # 售价
                # print item
                yield item
        except Exception as e:
            self.exception_handel(response, e)

    def handel_erro(self, response):
        logging.info("inside_handel_erro")

    def exception_handel(self, response, e):
        logging.error("记录日志：标签变动 %s ,%s" % (response.url, e))
        logging.info("页面源码：%s ,%s" % (response.url, response.body))
        logging.error("Exception: %s" % traceback.format_exc().replace("\"", "'"))
        db_operate = DbOperateClass()
        # 错误描述只截取前500个字符，剩下的用***代替
        error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
        if len(error_desc) > 500:
            error_desc = error_desc[:500] + '***'
        # 错误日志记入日志表
        sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
              % ("C", getCompanyCode(self.company_code), "酒店", response.url, self.task_date, "timer_code",
                 "error_code", error_desc, str(self.task_id), str(self.task_date))
        db_operate.update_by_sql(sql)
        if self.save_server == 'TRUE':
            http_common = HttpCommon()
            operate_log = {'type': 'LOG',
                           'log_type': "C",
                           'company_code': getCompanyCode(self.company_code),
                           'busi_type': "酒店",
                           'url': response.url,
                           'batch_num': self.task_date,
                           'timer_code': "timer_code",
                           'error_code': "error_code",
                           'error_desc': error_desc,
                           'task_id': str(self.task_id),
                           'task_date': str(self.task_date)}
            http_common.get_method(operate_log)

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        if kwargs['miss_product_flag'] == 'FALSE':
            kwargs['url'] = ''
            kwargs['lv_product_id'] = ''
        else:
            kwargs['timer_granularity'] = ''
            kwargs['timer_id'] = ''

        return cls(
            crawler,
            spider_name=kwargs['spider_name'],
            city_code=kwargs['city_code'],
            company_code=kwargs['company_code'],
            task_id=kwargs['task_id'],
            task_date=kwargs['task_date'],
            timer_param=kwargs['timer_param'],
            timer_granularity=kwargs['timer_granularity'],
            timer_id=kwargs['timer_id'],
            url=kwargs['url'],
            lv_product_id=kwargs['lv_product_id'],
            miss_product_flag=kwargs['miss_product_flag']
        )

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        if self.miss_product_flag == 'FALSE':
            db_operate = DbOperateClass()
            db_operate.update_status(2, "PYTHON", self.timer_id)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed: %s ,reason:%s' % (spider.name, reason))
        db_operate = DbOperateClass()
        if self.miss_product_flag == 'TRUE':  # 补录
            db_operate.update_miss_status(1, self.url, self.task_id)
        elif self.miss_product_flag == 'FALSE':
            sql = "SELECT timer.timer_status FROM cpa_timer timer WHERE timer.timer_type='PYTHON' AND timer.timer_id " \
                  "= '" + self.timer_id + "'"
            (fc, count) = db_operate.select_by_sql(sql)
            spider.logger.info('SELECT_COUNT: %s ,SQL:%s' % (count, sql))
            timer_status = ""
            for row in fc:
                timer_status = row[0]
            logging.info("timer_status:%s" % timer_status)
            if timer_status == '2':
                db_operate.update_status(5, "PYTHON", self.timer_id)
