# -*- coding: utf-8 -*-
import scrapy
from KKNet.items import KknetItem


class KknetSpider(scrapy.Spider):
    name = 'kknet'
    allowed_domains = ['kekenet.com']
    start_urls = ['http://www.kekenet.com/video/movie/',
                  'http://www.kekenet.com/video/tv/',
                  'http://www.kekenet.com/video/englishplay/',
                  'http://www.kekenet.com/Article/videolis/video/']

    root_domain = 'http://www.kekenet.com'

    def start_requests(self):
        '''
        启动抓取请求
        :return:
        '''

        for url in self.start_urls:
            yield scrapy.Request(url, self.parse)

    def parse(self, response):
        '''
       抓取每个类型首页上影片列表中，影片名称对应的链接。
       :param response:
       :return:
       '''

        movie_links = response.xpath('//div[@class="lb_box"]/ul/li/a/@href').extract()

        for link in movie_links:
            url = f'{self.root_domain}{link}'
            yield scrapy.Request(url, callback=self.parse_movie_list_page)

    # def parse_movie_links(self, response):

    def parse_movie_list_page(self, response):
        '''
        获取每个影片内容的网页页面数字，并根据页面数，重新构建页面信息抓取链接。
        :param response:
        :return:
        '''
        pages = response.xpath('//div[contains(@class,"page") and contains(@class,"th")]/a/text()').extract()
        last_page_num = pages[-1]

        last_page: int = 0
        try:
            last_page = int(last_page_num[-1])

            for i in range(last_page):
                if 0 == i:
                    yield scrapy.Request(response.url, callback=self.parse_move_item_link)
                else:
                    url = f'{response.url}/List_{i}.shtml'
                    yield scrapy.Request(url, self.parse_move_item_link)

        except:

            yield scrapy.Request(response.url, callback=self.parse_move_item_link)

    def parse_move_item_link(self, response):
        '''
        抓取每个页面中所有的信息链接。
        :param response:
        :return:
        '''

        move_item_links = response.xpath('//div[@class="list_box_2"]/ul/li/a/@href').extract()

        for item_link in move_item_links:
            url = f'{self.root_domain}{item_link}'
            yield scrapy.Request(url, self.parse_movie_dialoge)

    def parse_movie_dialoge(self, response):
        '''
        抓取页面中英文信息。
        :param response:
        :return:
        '''

        english_items = response.xpath('//div[@class="info-qh"]/div[@class="qh_en"]')
        english_items = english_items.xpath('string(.)').extract()

        chinese_items = response.xpath('//div[@class="info-qh"]/div[@class="qh_zg"]')
        chinese_items = chinese_items.xpath('string(.)').extract()

        kItem = KknetItem()

        dialoges = []

        for chinese, english in zip(chinese_items, english_items):
            dialoge = f'{chinese.strip()}\t{english.strip()}'
            dialoges.append(dialoge)

        kItem['dialoge'] = dialoges

        return kItem
