# -*- coding: UTF-8 -*-
import scrapy
from scrapy.selector import Selector

from pangolin.common.http_common import HttpCommon
from pangolin.items import TicketPlistItem
from scrapy.http import Request
import traceback
from scrapy import signals
import os, sys
import subprocess
from pangolin.common.date_common import DateCommon
from pangolin.spider_enum.companyCodeEnum import getCompanyCode
from pangolin.common.db_operateCommon import *
import logging

reload(sys)
sys.setdefaultencoding('UTF-8')

class PangolinSpider(scrapy.Spider):
    name = 'tc_ticket_list_spider'
    allowed_domain = 'ly.com'  # 允许爬虫运行的域
    # start_urls = [
    #     'http://www.ly.com/scenery/scenerysearchlist_2_41__0__0__0_0_0.html']

    def __init__(self, crawler, spiderName, cityCode, companyCode, taskId, taskDate, timerParam, timerGranularity,
                 timerId):
        # 接受入参,全局引用
        self.spiderName = spiderName
        self.cityCode = cityCode
        self.companyCode = companyCode
        self.taskId = taskId
        self.taskDate = taskDate
        self.timerParam = timerParam
        self.timerGranularity = timerGranularity
        self.timerId = timerId

        # 根据cityCode获得城市名称，并且设置起始页以及下一页
        db_operate = DbOperateClass()
        sql = "SELECT c.name FROM cityList c WHERE cityId = '" + cityCode + "'"
        logging.info("SELECT_SQL:%s" % sql)
        date = db_operate.select_one_by_sql(sql)
        cityName = date[0].decode('utf-8')
        self.firstPageUrl = "http://www.ly.com/scenery/scenerysearchlist_0_0_cityName_0_0_0_0_0_0_0.html?spm=7.68076534.9.1&track=fromlistBtn%3E".replace("cityName", cityName)
        self.nextPageUrl = "http://www.ly.com/scenery/NewSearchList.aspx?&action=getlist&page=1&kw=cityName&pid=0&cid=0&cyid=0&sort=&isnow=0&spType=&lbtypes=&IsNJL=0&classify=0&grade=&dctrack=1%CB%871482907116131304%CB%8727%CB%875%CB%871867725962957913%CB%870&iid=0.7383308369761306".replace("cityName", cityName)
        self.start_urls = ['%s' % self.firstPageUrl]
        # 爬虫爬取前先删除同一批次同一城市的数据
        db_operate.delete_pd_list_data("ticket_plist_tc", getCompanyCode(companyCode), cityCode, taskId, taskDate)
        # 绑定爬虫开始和结束信号，便于监听爬虫开始与结束
        self.crawler = crawler
        cs = crawler.signals
        cs.connect(self.spider_closed, signals.spider_closed)
        cs.connect(self.spider_opened, signals.spider_opened)

    def parse(self, response):  # 页面解析函数，这里必须为parse()
        sel = Selector(response)
        try:
            product_tag = response.css('div.scenery_list')
            last_pages = response.css('a.last_page02.border_gray')
            # 判断是否有下一页标签以及产品信息，有则遍历出所有页的URL；如果当前页有产品信息，则直接获取当前页产品URL
            if last_pages and product_tag:
                last_page = response.css('a.last_page02.border_gray').xpath('@pagenum').extract()[0]

                for i in range(int(last_page)):
                    relative_url = self.nextPageUrl.split("1&kw=")[0]+str(i+1)+self.nextPageUrl.split("&page=1")[1]
                    request = Request(relative_url, callback=self.parse_product_list)
                    yield request
            elif product_tag:
                # relative_url = response.url
                # self.parse_product_list
                # request = Request(relative_url, callback=self.parse_product_list)
                # yield request
                sites_even = sel.css('div.scenery_list')
                for site in sites_even:
                    item = TicketPlistItem()
                    item['product_list_table'] = "ticket_plist_tc"
                    item['data_type'] = "TicketList"
                    item['ids'] = DateCommon.get_id_by_datetime()
                    item['company_code'] = getCompanyCode(self.companyCode)
                    item['platform_code'] = 'PC'
                    item['city_code'] = self.cityCode
                    item['scenery_name'] = site.css('.sce_name.goFinal').xpath('text()').extract()[0].replace(" ",
                                                                                                              "").replace(
                        "\n", "")
                    item['detail_url'] = "http://www.ly.com" + site.css('.sce_name.goFinal').xpath('@href').extract()[
                        0].replace(" ", "").replace(
                        "\n", "")
                    item['product_id'] = item['detail_url'].split('BookSceneryTicket_')[1].split('.html')[
                        0]
                    item['create_time'] = DateCommon.get_current_date()
                    item['update_time'] = item['create_time']
                    item['task_id'] = self.taskId
                    item['task_date'] = self.taskDate
                    yield item
                # self.parse_product_list1(response)
            else:
                pass
        except Exception as e:
            db_operate = DbOperateClass()
            logging.error("记录日志：标签变动 %s ,%s" % (response.url, e))
            logging.error("tc_ticket_list_spider_Exception: %s" % traceback.format_exc().replace("\"", "'"))

            # 错误描述只截取前500个字符，剩下的用***代替
            error_desc = traceback.format_exc().replace("\"", "'").replace(" ", "")
            if len(error_desc) > 500:
                error_desc = error_desc[:500] + '***'
            # 错误日志记入日志表
            sql = "insert into cpa_oper_log (log_type,company_code,busi_type,url,batch_num,timer_code,error_code,error_desc,task_id,task_date) values (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\" ,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")" \
                  % ("C", getCompanyCode(self.companyCode), "门票", response.url, self.taskDate, "timer_code",
                     "error_code", error_desc, str(self.taskId), str(self.taskDate))
            logging.info("INSERT_SQL:%s" % sql)
            db_operate.update_by_sql(sql)
            # 将爬虫状态设为异常状态（3）此逻辑暂时未整理
            if self.save_server == 'TRUE':
                http_common = HttpCommon()
                operate_log = {'type': 'LOG',
                               'log_type': "C",
                               'company_code': getCompanyCode(self.companyCode),
                               'busi_type': "门票",
                               'url': response.url,
                               'batch_num': self.taskDate,
                               'timer_code': "timer_code",
                               'error_code': "error_code",
                               'error_desc': error_desc,
                               'task_id': str(self.taskId),
                               'task_date': str(self.taskDate)}
                http_common.get_method(operate_log)

    def parse_product_list(self, response):
        sel = Selector(response)
        sites_even = sel.css('div.scenery_list')
        for site in sites_even:
            item = TicketPlistItem()
            item['product_list_table'] = "ticket_plist_tc"
            item['data_type'] = "TicketList"
            item['ids'] = DateCommon.get_id_by_datetime()
            item['company_code'] = getCompanyCode(self.companyCode)
            item['platform_code'] = 'PC'
            item['city_code'] = self.cityCode
            item['scenery_name'] = site.css('.sce_name.goFinal').xpath('text()').extract()[0].replace(" ",
                                                                                                      "").replace(
                "\n", "")
            item['detail_url'] = "http://www.ly.com" + site.css('.sce_name.goFinal').xpath('@href').extract()[
                0].replace(" ", "").replace(
                "\n", "")
            item['product_id'] = item['detail_url'].split('BookSceneryTicket_')[1].split('.html')[
                0]
            item['create_time'] = DateCommon.get_current_date()
            item['update_time'] = item['create_time']
            item['task_id'] = self.taskId
            item['task_date'] = self.taskDate
            yield item

    @classmethod
    def from_crawler(cls, crawler, **kwargs):
        logging.info('inside_from_crawler: ')
        # 循环遍历出入参
        args_key = {'spiderName': '', 'cityCode': '', 'companyCode': '', 'taskId': '', 'taskDate': '', 'timerParam': '',
                    'timerGranularity': '', 'timerId': ''}
        arg = {}
        for key in args_key:
            if key in kwargs:
                arg[key] = kwargs[key]
        return cls(
            crawler,
            spiderName=arg['spiderName'],
            cityCode=arg['cityCode'],
            companyCode=arg['companyCode'],
            taskId=arg['taskId'],
            taskDate=arg['taskDate'],
            timerParam=arg['timerParam'],
            timerGranularity=arg['timerGranularity'],
            timerId=arg['timerId'])

    def spider_opened(self, spider):
        logging.info('Spider_opened_list: %s' % spider.name)
        # 爬虫设为启动状态2
        db_operate = DbOperateClass()
        db_operate.update_status(2, "PYTHON", self.timerId)

    def spider_closed(self, spider, reason):
        logging.info('spider_closed_list: %s ,reason:%s' % (spider.name, reason))
        spiderName = spider.name.replace("list", "detail")
        # 列表页抓取结束，开始抓取详情页
        path = "python " + os.getcwd() + "/main.py" + " " + str(self.cityCode) + " " + str(self.companyCode) + " " + \
               str(self.taskId) + " " + str(self.taskDate) + " " + spiderName + " " + str(self.timerParam) + " " + \
               str(self.timerGranularity) + " " + self.timerId  # linux
        logging.info('path:%s' % path)
        subprocess.call(path, shell=True)