#-*- coding:utf-8 -*-
from urlparse import parse_qs
from urllib import urlencode

from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy import log

from zuowen.items import ZuowenItem


class A51testSpider(BaseSpider):
    name = "51test"
    start_urls = (
        'http://www.51test.net/sitemap/zuowen/',
        )

    def parse(self, response):
        sel = Selector(response)
        for sub_sel in sel.xpath("//a[@class='ared']"):
            url = sub_sel.xpath("@href").extract()[0].strip()
            category = sub_sel.xpath("text()").extract()[0].strip()[5:]
            yield Request(url, meta={"category": category}, callback=self.parse_1)

    def parse_1(self, response):
        sel = Selector(response)
        category_0 = response.meta["category"]
        for sub_sel in sel.xpath("//div[@class='news-2-left-content']/table/tr")[1:]:
            category_1 = None
            for index, ssub_sel in enumerate(sub_sel.xpath(".//a")):
                category_2 = None
                url = ssub_sel.xpath("@href").extract()[0].strip()
                category = ssub_sel.xpath("text()").extract()[0].strip()
                if index == 0:
                    category_1 = category
                else:
                    category_2 = category
                    yield Request(url, meta={"category": (category_0, category_1, category_2)}, callback=self.parse_2)

    def parse_2(self, response):
        sel = Selector(response)
        meta = response.meta
        for sub_sel in sel.xpath("//div[@class='news-list-left-content']/ul/li"):
            if sub_sel.xpath("a") and sub_sel.xpath("span"):
                url = "http://www.51test.net" + sub_sel.xpath("a/@href").extract()[0].strip()
                title = sub_sel.xpath("a/text()").extract()[0].strip()
                zuowen_time = sub_sel.xpath("span/text()").extract()[0].strip()
                meta["title"] = title
                meta["zuowen_time"] = zuowen_time
                yield Request(url, meta=meta, callback=self.parse_3)

        for sub_sel in sel.xpath("//div[@class='news-list-left-content']/form//a | //div[@class='news-list-left-content']/ul/div//a"):
            text = sub_sel.xpath("text()").extract()[0].strip()
            if text == u"下一页" or text == u"点击进入下一页":
                url_args, url_params = sub_sel.xpath("@href").extract()[0].encode("GB18030").strip().split("?")
                url = "http://www.51test.net" + url_args + "?" + urlencode({k: v[0] for k, v in parse_qs(url_params).iteritems()})
                yield Request(url, meta={"category": meta["category"]}, callback=self.parse_2)

    def parse_3(self, response):
        sel = Selector(response)
        meta = response.meta
        text = "".join(sel.xpath("//div[@class='show_content']/p//text() | //div[@class='show_content']/text() | //div[@class='content']//text() | //blockquote//text()").extract()).strip()
        if not text.strip():
            self.log("url:%s text is empty!" % response.url, level=log.WARNING)
        i = ZuowenItem()
        i["title"] = meta["title"]
        i["category"] = list(meta["category"])
        i["zuowen_time"] = meta["zuowen_time"]
        i["text"] = text
        yield i