# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from main.models import ScrapyItem
import json


class IcrawlerSpider(CrawlSpider):
    name = 'icrawler'

    def __init__(self, *args, **kwargs):
        # We are going to pass these args from our django view.
        # To make everything dynamic, we need to override them inside __init__ method
        self.unique_id = kwargs.get('unique_id')
        self.domains = []
        self.domains.append(kwargs.get('domain'))
        self.domains.append('club.xywy.com')
        self.url = kwargs.get('url')
        self.start_urls = [self.url]
        self.allowed_domains = ['so.xywy.com', 'club.xywy.com']

        IcrawlerSpider.rules = [
            Rule(LinkExtractor(restrict_xpaths=('//div[@class="search-detail-hd blue aUnderL"]/a[@href]')),
                 callback='parse_items'),
        ]
        super(IcrawlerSpider, self).__init__(*args, **kwargs)

    def parse_items(self, response):
        wt_desc = response.xpath('//p[@class="fl dib fb"]/text()').extract_first('')
        yshd = response.xpath('//div[@class="pt15 f14 graydeep  pl20 pr20 deepblue"]').xpath('string(.)').extract_first(
            '')

        item = ScrapyItem()
        item.unique_id = self.unique_id
        item.data = json.dumps([response.url, wt_desc, yshd])
        item.save()

        # 由于没使用管道，所以这里可以不传递任何东西
        yield {}
