import scrapy

from ScrapyObject.spiders.utils.url_utils import *

'''
scrapy crawl ydah -o ydah.json
https://avfolder.com/
'''


class YdahSpider(scrapy.Spider):
    name = "ydah"
    # 前缀
    prefix = 'https://'
    # 中缀
    website = 'avfolder'
    # 后缀
    suffix = '.com/'
    allowed_domains = [website + '.com']

    # start_urls = ['https://avfolder.com/zh/TOKIO']

    def start_requests(self):
        my_list = []
        for i in range(1, 2001):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/censored/online?page=' + str(i))
        for i in range(1, 418):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/uncensored/online?page=' + str(i))
        for i in range(1, 582):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/western/online?page=' + str(i))
        for i in range(1, 158):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/fc2/online?page=' + str(i))
        for i in range(1, 284):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/hanime/online?page=' + str(i))
        for i in range(1, 2001):
            my_list.append(self.prefix + self.website + self.suffix + 'zh/chinese/online?page=' + str(i))
        for url in my_list:
            yield scrapy.Request(url)

    def __init__(self):
        self.i = 0

    def parse(self, response):
        content = get_data(response)
        if "/online?page=" in response.url:
            url_list = response.xpath("//a[@class='video-list-item-link']/@ href").extract()
            pic_list = response.xpath("//img[@class='image-background__image lazyload']/@ data-src").extract()
            name_list = response.xpath("//img[@class='image-background__image lazyload']/@ alt").extract()
            tags = ""
            if response.url.startswith(self.prefix + self.website + self.suffix + 'zh/censored/online?page='):
                tags = "有码"
            elif response.url.startswith(self.prefix + self.website + self.suffix + 'zh/uncensored/online?page='):
                tags = "无码"
            elif response.url.startswith(self.prefix + self.website + self.suffix + 'zh/western/online?page='):
                tags = "欧美"
            elif response.url.startswith(self.prefix + self.website + self.suffix + 'zh/fc2/online?page='):
                tags = "FC2"
            elif response.url.startswith(self.prefix + self.website + self.suffix + 'zh/hanime/online?page='):
                tags = "成人动画"
            elif response.url.startswith(self.prefix + self.website + self.suffix + 'zh/chinese/online?page='):
                tags = "国产"
            if len(url_list) and len(pic_list) and len(name_list):
                for i in range(len(url_list)):
                    self.i = self.i + 1
                    yield get_video_item(id=self.i, tags=tags, url=url_list[i], name=name_list[i], pUrl=pic_list[i], vUrl="")
            # 提取url
            for url in url_list:
                if url.startswith('/'):
                    s = split_joint(self.prefix + self.website + self.suffix, url)
                    if s.startswith("https://avfolder.com/zh/"):
                        yield scrapy.Request(split_joint(self.prefix + self.website + self.suffix, url), callback=self.parse)
                elif url.startswith('http') or url.startswith('www'):
                    if url.startswith("https://avfolder.com/zh/"):
                        yield scrapy.Request(url, callback=self.parse)
        else:
            video_url = get_video_url_three(content)
            if len(video_url):
                for i in range(len(video_url)):
                    self.i = self.i + 1
                    yield get_video_item(id=self.i, tags="", url=response.url, name="", pUrl="", vUrl=video_url[i])
