# -*- coding: utf-8 -*-
from datetime import datetime

import scrapy
from bs4 import BeautifulSoup as bsoup
from scrapy import Selector
from scrapy.http import Response

from giligili_crawler import items


class HomeSpider(scrapy.Spider):
    name = 'home'
    allowed_domains = ['giligi.li']

    start_urls = {
        'https://giligi.li',
    }

    def parse(self, response):
        yield from self.parse_next_page_url(response)
        yield from self.parse_article(response)

    def parse_next_page_url(self, response):
        nests = response.xpath('.//a[contains(@href,"https://giligi.li/page")]/@href').extract()
        for url in nests:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse_article(self, response):
        """
        解析文章基础信息
        :param response:
        :return:
        """
        articles = response.xpath(".//article")
        for article in articles:
            item = items.GiliArticleItem()
            item['title'] = article.xpath('.//h2/a[1]/text()').extract_first()
            item['label'] = article.css('a[class*="cat label label-important"]::text').extract_first()
            dateString = article.css('p[class*="text-muted time"]::text') \
                .extract()[-1].replace(u'发布于', '').strip()
            item['update_time'] = int(datetime.strptime(dateString, '%Y-%m-%d').timestamp())
            item['image_url'] = article.css('img.thumb::attr(data-original)').extract_first()
            item['intro'] = ''.join(article.css('p.note::text').extract())
            item['article_id'] = article.css('h2>a[href][title]::attr(href)').extract_first().split('/')[-1]
            item['tags'] = article.css('span.post-tags a[rel=tag]::text').extract()

            if '公告' not in item['label'] and ('合集' not in item['title'] or 'MMD' in item['title']):
                yield item
                detailUrl = article.css('h2>a[href][title]::attr(href)').extract_first()
                yield scrapy.Request(url=detailUrl, callback=self.change_detail)

    def change_detail(self, response: Response):
        """
        解析文章详细介绍
        :return:
        """
        bs = bsoup(response.body, 'lxml')
        body = bs.body
        for tag in body.find_all(True):
            tag['hidden'] = ''
        articleTag = body.find('article', class_='article-content')
        del articleTag['hidden']
        for tag in articleTag.find_all(True):
            del tag['hidden']
        for tag in articleTag.parents:
            del tag['hidden']
            tag['style'] = 'min-height:auto;'
        li_world = body.select_one('p[style="text-align: center;"]')
        if li_world:
            li_world['hidden'] = ''
        item = items.GiliArticleDetailItem()
        # item['content'] = str(bs.prettify())
        yield from self.parse_detail(item, response)

    def parse_detail(self, item: items.GiliArticleDetailItem, response: Response):
        articleTag = response.css('article.article-content')

        bai_du = articleTag.re(r'(http(s|)://pan.baidu.com/s/\w*).*?提取码(: |：)([a-zA-Z0-9]{4})')

        xun_lei = articleTag.re(r'(magnet:\?xt=urn:btih:\w+(&dn=.+?\s)?)')

        item['download_urls'] = list()

        if len(bai_du) > 0:
            bai_dus = "~~~".join(bai_du)
            item['download_urls'].append(bai_dus)

        if len(xun_lei) > 0:
            item['download_urls'].append(xun_lei[0].lstrip())

        item['article_id'] = response.url.split('/')[-1]

        entry = articleTag.xpath('.//a[contains(text(),"进入里世界")]/@href').extract_first()
        if entry and len(entry.split('/')) > 3:
            response.request.headers['article_id'] = item['article_id']
            yield scrapy.Request(url=entry, headers=response.request.headers, callback=self.parse_play_page_all)
            yield item

    def parse_play_page_all(self, response: Response):
        yield from self.parse_video_url(response)
        node = response.css('div.article-paging').re(r'http.*?/[0-9]')
        for url in node:
            yield scrapy.Request(url=url, headers=response.request.headers, callback=self.parse_video_url)

    def parse_video_url(self, response: Response):
        selector = Selector(response=response)
        video_urls = selector.re("http.*?mp4")
        item = items.VideoUrlItem()
        item['urls'] = set()
        for url in video_urls:
            item['urls'].add(str(url.replace('\\', '')))
        item['urls'] = list(item['urls'])
        item['article_id'] = str(response.request.headers['article_id'], 'utf-8')
        yield item
