# -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals

from quinnSpider.items import DyttItem

from quinnSpider.tool.BrowserOption import BrowserOption

class DyttSpider(scrapy.Spider):
    name = "dytt"
    allowed_domains = ["dytt8.net"]
    #'http://www.dytt8.net/'
    start_urls = ['http://www.dytt8.net/html/gndy/dyzz/index.html']
    custom_settings = {
        'REDIRECT_ENABLED' : False,
        'LOG_LEVEL' : 'WARNING',
         # 将scrapy默认的user-agent中间件关闭
        'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
    }
    
    headers = {
        'Host': 'www.dytt8.net',
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Referer': 'http://www.dytt8.net/',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }

    def __init__(self):
        #self.browser = webdriver.PhantomJS(executable_path='H:/python/QuinnSpider/phantomjs.exe')
        self.browser = BrowserOption().selectBrower()
        self.browser.set_page_load_timeout(30)
        dispatcher.connect(self.spider_closed, signals.spider_closed)
        #第二个参数是信号（spider_closed:爬虫关闭信号，信号量有很多）,
        #第一个参数是当执行第二个参数信号时候要执行的方法
    
    def start_requests(self):
            for url in self.start_urls:
                yield scrapy.Request(
                    url,  
                    meta={'usedPhantomJs':False},  
                    headers=self.headers,
                    callback=self.parse,
                    errback=self.err_parse,
                    dont_filter=True)


    def spider_closed(self, spider):
        print(DyttSpider.name+" spider closed")
        self.browser.close()

    def err_parse(self, response):
            print "parse error"

    def parse(self, response):
        selector = response.xpath('//table[@class="tbspan"]')
        print "start parse"
        print (selector.xpath('tr[2]/td[2]/b/a'))
        for dyttType in selector.xpath('tr[2]/td[2]/b/a'):#only choose first temporarily
            print dyttType
            dytt = DyttItem()
            dytt["movieName"] = dyttType.xpath('text()').extract()[0]#.encode('utf-8')
            base_url = response.url
            movieUrl = base_url[:base_url.index("/",7)] + dyttType.xpath('@href').extract()[0]
            print 'quinn',dytt["movieName"],' '+movieUrl

            yield scrapy.Request(
                movieUrl, 
                meta={'dytt': dytt},  
                headers=self.headers,
                callback=self.detail_parse
                )

    def detail_parse(self, response):
        dytt = response.meta['dytt']
        Pic_selector = response.xpath('//div[@id="Zoom"]/td')
        try:
            dytt["moviePic"] = Pic_selector.xpath('p/img[1]/@src').extract()[0]
            #dytt["movieType"] = Pic_selector.xpath('p/text()[7]').extract()[0]
            dytt["movieUrl"] = Pic_selector.xpath('table/tbody/tr/td/a/@href').extract()[0]
            #dytt["movieDescript"] = Pic_selector.xpath('p/text()[38]').extract()[0]
            print dytt["moviePic"], ' '+dytt["movieUrl"]
        except Exception as e:
            print str(e),response.status,dytt["movieName"],response.url
        return dytt
