from lxml import etree

import scrapy, os
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.spiders import CrawlSpider, Rule

from crawl_demo.itemloaders import LinkLoader
from crawl_demo.items import OschinaItem, LinkItem
from scrapy.loader import ItemLoader
from scrapy.linkextractors import LinkExtractor

base_url = 'https://www.oschina.net/'

page_url = 'https://www.oschina.net/action/ajax/get_more_news_list?newsType=&p='


class OschinaSpider(scrapy.spiders.Spider):
    index = 2
    name = "oschina"
    start_urls = [
        "https://www.oschina.net/news/industry",
    ]
    custom_settings = {
        "ITEM_PIPELINES": {
            "crawl_demo.pipelines.OschinaPipeline": 1,
        }
    }

    def parse(self, response):
        links = []
        items = response.xpath('//div[@class="main-info box-aw"]/a/@href')
        print(items)
        # 循环出所有的文章URL
        for item in items:
            url = item.extract()
            # 取出href
            if 'http' not in url:
                # 排除其他无关URL
                c_url = base_url + url
                # 拼接
                links.append(c_url)
                print('match:', c_url)
            else:
                print('not match:', url)

        for link in links:
            print(link)
            yield Request(link, callback=self.get_content)

        if (self.index < 10):
            self.index = self.index + 1
            pageNo = str(self.index)
            url = '%s%s' % (page_url, self.index)
            print("url====>", url)
            yield Request(url, callback=self.parse)

    def get_content(self, response):
        item = OschinaItem()
        content = response.xpath('//div[@class="news-content"]').extract_first()
        item['content'] = content
        item['url'] = response.url
        # l = ItemLoader(item=OschinaItem(), response=response)
        # l.add_xpath('name', '//div[@class="product_name"]')
        # l.add_xpath('name', '//div[@class="product_title"]')
        # l.add_xpath('price', '//p[@id="price"]')
        # l.add_css('stock', 'p#stock]')
        # l.add_value('content', content)
        # return l.load_item()
        yield item


class LianjiaSpider(CrawlSpider):
    name = "lianjia"

    # allowed_domains = ["sh.58.com"]

    start_urls = [
        "http://sh.58.com/chuzu/"
    ]

    custom_settings = {
        "ITEM_PIPELINES": {
            "crawl_demo.pipelines.CrawlDemoPipeline": 1,
        }
    }

    rules = [
        # 匹配正则表达式,处理下一页
        Rule(LinkExtractor(allow=(r'sh.58.com/chuzu/pn\d+',)), follow=True, callback='parse_item'),

        # 匹配正则表达式,结果加到url列表中,设置请求预处理函数
        # Rule(FangLinkExtractor(allow=('http://www.lianjia.com/client/', )), follow=True, process_request='add_cookie')
    ]

    def parse_item(self, response):
        print("###############response#################")
        print("response====>", response)
        item = OschinaItem()
        yield item


class LinkSpider(CrawlSpider):
    name = "link"
    allowed_domains = ["huxiu.com"]
    start_urls = [
        "http://www.huxiu.com/index.php"
    ]

    custom_settings = {
        "ITEM_PIPELINES": {
            "crawl_demo.pipelines.JsonWriterPipeline": 1,
        }
    }

    rules = (
        # 提取匹配正则式'/group?f=index_group'链接 (但是不能匹配'deny.php')
        # 并且会递归爬取(如果没有定义callback，默认follow=True).
        Rule(LinkExtractor(allow=('/channel/.+\.html',), deny=('deny\.php',))),
        Rule(LinkExtractor(allow=('/vipColumn/contentList/\d+',), deny=('deny\.php',))),
        # 提取匹配'/article/\d+/\d+.html'的链接，并使用parse_item来解析它们下载后的内容，不递归
        Rule(LinkExtractor(allow=('/article/\d+\.html',)), callback='parse_item'),
    )

    def parse_item(self, response):
        self.logger.info('Hi, this is an item page! %s', response.url)
        detail = response.xpath('//div[@class="article-wrap"]')
        # item = LinkItem()
        title = detail.xpath('//h1[@class="t-h1"]/text()').extract_first()
        # item['title'] = detail.xpath('//h1[@class="t-h1"]/text()').extract_first()
        posttime = detail.xpath(
            '//div[@class="article-author"]/span[@class="article-time"]/text()').extract_first()
        # item['link'] = response.url
        # item['posttime'] = detail.xpath(
        #     '//div[@class="article-author"]/span[@class="article-time"]/text()').extract_first()
        # print(item['title'], item['link'], item['posttime'])
        # yield item

        l = LinkLoader(item=LinkItem(), response=response)
        l.add_value('title', title)
        l.add_value('posttime', posttime)
        l.add_value('link', response.url)
        yield l.load_item()
