# -*- coding: utf-8 -*-
from scrapy import Spider
from scrapy.http import FormRequest, Request, Response
from scrapy.selector import Selector
from scrapy.utils.project import get_project_settings

from Wallhaven.items import WallhavenWallpaperItem


class WallhavenanimewallpaperSpider(Spider):
    name = 'WallhavenAnimeWallpaper'
    allowed_domains = ['wallhaven.cc']
    start_urls = ['http://wallhaven.cc/']
    login_info={}
    pages=[]
    max_page = 0

    # account_file='account.txt',
    def __init__(self, *args, **kwargs):
        """解析account_file的内容到self.login_info里"""
        super(WallhavenanimewallpaperSpider, self).__init__(*args, **kwargs)
        settings = get_project_settings()
        account_file = settings.get('ACCOUNT_CONFIG_FILE')
        self.pages = settings.get('PAGES')
        self.max_page = settings.getint('MAX_PAGE')
        with open(account_file, 'r') as f:
             for line in f:
                 if line[0] == '#':
                     continue
                 kv = line.strip().split('=')
                 if len(kv) == 2:
                    self.login_info[kv[0]]=kv[1]
        if not 'username' in self.login_info or not 'password' in self.login_info:
            raise RuntimeError("Please set your own account info in file: %s" % account_file)


    def start_requests(self):
        """先转到登入页面，因为页面似乎有一个会变的 '_token' 参数"""
        return [Request('https://wallhaven.cc/login')]


    def parse(self, response: Response):
        """这里处理登入账号的操作"""
        login_form = response.xpath("//form[@id='login']")
        inputs = login_form.xpath("./input")
        form_data={} # Post 的表单信息
        for x in inputs:
            attr_dict = x.attrib
            # 下面的代码理论上可以写死， 因为只有 _token 是从页面拿的
            if 'value' in attr_dict and len(attr_dict['value']) > 0:
                form_data[attr_dict['name']] = attr_dict['value']
            elif attr_dict['name'] in self.login_info:
                form_data[attr_dict['name']] = self.login_info[attr_dict['name']]
            else:
                raise AttributeError("Unknown Error!!")
        return [FormRequest(url='https://wallhaven.cc/auth/login', formdata=form_data, callback=self.login_done)]


    def login_done(self, response: Response):
        """登录成功，转到特定页面，这些页面在 settings.py 中 PAGES 变量中指定"""
        username = response.xpath(r"//a[@class='username usermenu-section-title']/text()").extract_first()
        print("Current User: %s" % username)
        return [Request(url=('https://wallhaven.cc/' + x), callback=self.gallery_page, meta={'page_index':1, 'url':('https://wallhaven.cc/' + x)}) for x in self.pages]


    def gallery_page(self, response: Response):
        """包含很多缩略图的页面，获取详情页链接"""
        detail_urls = response.xpath("//figure/a[@class='preview']/@href").extract()
        for url in detail_urls:
            yield Request(url=url, callback=self.detail_page)
        page_index = response.meta['page_index']
        base_url   = response.meta['url']
        if page_index < self.max_page:
            yield Request(url=(base_url + '?page=' + str(page_index+1)), meta={'page_index':(page_index+1), 'url': base_url}, callback=self.gallery_page)


    def detail_page(self, response: Response):
        """详情页在这里处理，导出Item"""
        item = WallhavenWallpaperItem()
        item['resolution'] = response.xpath("//h3[@class='showcase-resolution']/text()").extract_first() # e.g. 1920x1080
        item['tags']       = ";".join(response.xpath("//ul[@id='tags']/li//a[@class='tagname']/text()").extract()) # e.g. anime girl;tears
        item['name']       = response.xpath("//img[@id='wallpaper']/@data-wallpaper-id").extract_first() # e.g. abcdef
        item['flag']       = ";".join(response.xpath("//input[@name='purity' and @checked]/@value").extract()).upper() # SFW or SKETCHY or NSFW
        # 有时候拿不到FLAG，那就取第一个LABEL的文字
        if not item['flag']:
            item['flag'] = response.xpath("//form[@id='wallpaper-purity-form']/label[1]/text()").extract_first().upper()
        item['url']        = response.xpath("//img[@id='wallpaper']/@src").extract_first() # url of full size image
        return item
