import scrapy
import re
from selenium import webdriver
from selenium.webdriver.firefox.webdriver import Options as op


from tv567.items import Tv567Item
from tv567.items import Get_count_Ok


class A567tvSpider(scrapy.Spider):
    name = '567tv'
    #allowed_domains = ['xxx.com']
    start_urls = ['http://www.576tv.com/']
    #获取爬行的板块
    modol_list=[]

    def __init__(self):
        # self.options=op()
        # self.options.add_argument('--handless')
        # self.options.add_argument('--disable-gpu')
        #opt=op()

        self.bro=webdriver.Firefox()

    def parse(self, response):#抓取需要爬行的板块
        drop_list=response.xpath('//*[@id="header"]/div[2]/div/ul/li[2]/div/ul[1]/li')
        for dros in drop_list:
            hrefs=dros.xpath('./a/@href').extract_first()
            titles=dros.xpath('./a/text()').extract_first()
            self.modol_list.append(hrefs)

        #各大板块中的url手动发起请求

        for modol in self.modol_list:
            if modol:
            # print(modol)
                yield scrapy.Request(modol,callback=self.get_page_num)
            else:
                pass

    def get_page_num(self,response):

        #if  response !=None:
            #print(response)
        #获取每隔板块中的页码
        pages=response.xpath('//*[@id="content"]/div[1]/div[4]/div/a[5]/@href').extract_first()
        if pages !=None:
            #print(pages)
            re_="/Cate/\d+\-(\d+)\.html"
            aa=int(re.findall(re_,pages,re.S)[0])
            #循环列表，取每个板块的页面
            for num_pages in range(aa):
                cc=response.url[:-5]+'-'+str(num_pages)+'.html'
                yield  scrapy.Request(cc,callback=self.cunt_oks)
        else:
            pass

                # for num_meber in range(int(nums)):
                #     url=str(pages).split(".")[0][-2:].replace(str(num_meber))


    def cunt_oks(self,response):
        #取出每一页当中的连接和title
        #item=Tv567Item()
        heref=response.xpath('//*[@id="content"]/div[1]/div[3]/ul/li')
        for one_heres in heref:
            titles=one_heres.xpath('./p/a/text()').extract_first()
            one_page_href=one_heres.xpath('./p/a/@href').extract_first()
            #title="".join(titles)
            #item['title'] = title
            #print(title)
            yield  scrapy.Request(one_page_href,callback=self.get_contunt)


    def get_contunt(self,response):
        #print(response.url,type(response.url))
        pass
        item=Get_count_Ok()
        ok_count=response.xpath('//*[@id="content"]/div[1]/div[2]//text()').extract()
        ok_moves=response.xpath('//*[@id="videohPlayer_html5_api"]/source/@src').extract_first()
        titles="".join(ok_count[1])
        ok_count=''.join(ok_count[-2].replace("\n","").split(" "))
        #print(titles)
        item['titles']=titles
        item['ok_count']=ok_count
        item['ok_moves']=ok_moves
        #print(response.url,ok_moves)

        yield  item


    def close(self,spider):
        spider.bro.quit()






