import scrapy

from fanqie_resource.items import FanqieResourceItem
from fanqie_resource.spiders.utils.page_handle import get_page, get_page_model


class FanqieSpider(scrapy.Spider):
    name = 'fanqie'
    # allowed_domains = ['fqzy.cc']   # 先关闭域名过滤
    start_urls = ['http://fqzy.cc/']

    # 定义初始页数
    page = 1
    # page_url = "https://www.fqzy.cc/index.php/vod/type/id/6/page/%s.html"

    # 解析详情页中的数据
    def parse_detail(self, response):
        # response.meta返回接收到的meta字典
        item = response.meta['item']
        videoName = response.xpath("/html/body/div[4]/div[1]/div/div/div[2]/div[1]/h2/text()").extract_first()
        videoAlias = response.xpath(
            "/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[1]/span/text()").extract_first()
        videoDirector = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[2]/span/text()').extract_first()
        videoProtagonist = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[3]/span/text()').extract_first()
        videoType = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[4]/span/text()').extract_first()
        videoArea = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[5]/span/text()').extract_first()
        videoLanguage = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[6]/span/text()').extract_first()
        videoReleaseDate = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[7]/span/text()').extract_first()
        videoUpdateDate = response.xpath(
            '/html/body/div[4]/div[1]/div/div/div[2]/div[2]/ul/li[8]/span/text()').extract_first()
        videoPlotIntroduction = response.xpath('/html/body/div[4]/div[2]/div[2]/text()').extract_first()
        videoImgRemoteUrl = response.xpath('/html/body/div[4]/div[1]/div/div/div[1]/img/@src').extract_first()
        videoScore = response.xpath('/html/body/div[4]/div[1]/div/div/div[2]/div[1]/label/text()').extract_first()
        resourceSource = response.xpath("//div[@class='vodplayinfo']//div//h3/text()").extract_first()
        videoDetailUrl = response.url

        # 优化更正影片来源的文本
        resourceSource = resourceSource.replace("来源：", "")

        # 资源链接列表
        resourceLinks = []

        # 使用循环获取资源链接地址，并存入资源
        resource_link_list = response.xpath("//div[@class='vodplayinfo']/div/ul/li")
        print(resource_link_list)
        for resource_link in resource_link_list:
            resource_link = response.xpath(
                "//input[@name='copy_sel']/following-sibling::text()[1]").extract()  # Todo 有问题!!!
            print(resource_link)
            resourceLinks = resource_link

        item['videoName'] = videoName
        item['videoAlias'] = videoAlias
        item['videoDirector'] = videoDirector
        item['videoProtagonist'] = videoProtagonist
        item['videoType'] = videoType
        item['videoArea'] = videoArea
        item['videoLanguage'] = videoLanguage
        item['videoReleaseDate'] = videoReleaseDate
        item['videoUpdateDate'] = videoUpdateDate
        item['videoPlotIntroduction'] = videoPlotIntroduction
        item['videoImgRemoteUrl'] = videoImgRemoteUrl
        item['videoScore'] = videoScore
        item['resourceSource'] = resourceSource
        item['videoDetailUrl'] = videoDetailUrl
        item['resourceLinks'] = resourceLinks

        yield item

        # 总结：两个地方会用到item
        # 第一个用yield 返回item
        # 第二用yield手动发起requests请求或者FormRequests请求

    def parse_page(self, response):
        item = response.meta['item']
        print("321"+response.url)
        if response.url is not None:
            li_list = response.xpath("//span[@class='xing_vb4']")
        for li in li_list:

            detail_url = 'http://fqzy.cc' + li.xpath('a/@href').extract_first()
            # meta参数:请求传参.meta字典就会传递给回调函数的response参数
            # 在解析过程中产生新的url，需要对新的url再次发起请求时，yield 手动调用scrapy.Request方法对象，
            yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': item})

        # 获取前三页的数据
        # 自动获取max page
        # page页数处理(用到的时候在处理)
        # 1.通过二级标题链接拿到含最大页数的url
        page_max_exist_url = response.xpath("//a[@title='尾页']/@href").extract_first()  # todo 有问题！！！
        # 2.解析所拿到的url,获取最大页数
        page_max_num = get_page(page_max_exist_url)
        if self.page <= page_max_num:
            self.page += 1
            # 新页面拼接url构造 模型 替换
            this_page = response.url
            this_page = get_page_model(this_page)
            new_page_url_model = this_page+'/page/%s.html'

            new_page_url = new_page_url_model % self.page

            yield scrapy.Request(url=new_page_url, callback=self.parse_page, meta={'item': item})



    def parse(self, response):

        # 定义一个嵌套集合 用于存储数据
        list_selector = response.xpath("//div[@class='sddm']//li")
        for one_selector in list_selector:

            key_name = one_selector.xpath("a[1]/text()").extract()[0]
            first_title_dict = {}
            name_link_list = []
            # 获取二级标题名称
            name_list = one_selector.xpath("div/a/text()").extract()
            # 获取二级标题链接
            link_list = one_selector.xpath("div/a/@href").extract()

            # 使用循环将 标题名称和标题链接组队进行拼接
            for name in name_list:
                name_link_fict = {
                    name: 'http://fqzy.cc' + link_list[name_list.index(name)]
                }
                name_link_list.append(name_link_fict)
            # print(name_link_list)

            # 判断列表不为空 将列表存入字典中
            if name_link_list is not None:
                first_title_dict.update({key_name: name_link_list})

            # print("打印" + str(first_title_dict))

            # 调用处理方法  返回 url地址   在meta字典中返回一级标题，二级标题
            for second_title_dict in first_title_dict.get(key_name):
                # print("321"+str(second_title_dict))

                for key in second_title_dict:
                    item = FanqieResourceItem()  # 实例化item对象
                    # 二级标题链接
                    # print("321"+second_title_dict[key])
                    second_title_url = second_title_dict[key]
                    # page页数处理(用到的时候在处理)
                    # 1.通过二级标题链接拿到含最大页数的url
                    # 2.解析所拿到的url,获取最大页数

                    # 一级标题名称
                    item['videoPrimaryClassification'] = key_name
                    print(item['videoPrimaryClassification'])
                    # 二级标题名称
                    item['videoSecondaryClassification'] = key
                    print(item['videoSecondaryClassification'])


                    yield scrapy.Request(url=second_title_url, callback=self.parse_page, meta={'item': item})

