# encoding=utf8
from scrapy import Request
from scrapy.selector import Selector
from scrapy.spiders import Spider, Rule

# from DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
from scrapy_cst.items import ScrapyCstItem


class CstSpider(Spider):
    name = "cst_spider"
    domain = "http://www.cst.zju.edu.cn/"
    allowed_domains = ["www.cst.zju.edu.cn"]
    start_urls = [domain]

    cnt = 0

    # start_urls = [
    #     domain,
    #     "http://www.cst.zju.edu.cn/index.php?c=Index&a=tlist&catid=83&p=5",
    #     "http://www.cst.zju.edu.cn/index.php?c=Index&a=small_list&catid=120&p=10",
    #     "http://www.cst.zju.edu.cn/index.php?c=Index&a=detail&catid=120&id=3002",
    #     "http://www.cst.zju.edu.cn/index.php?c=Index&a=detail&catid=116&id=3083",
    #     "http://www.cst.zju.edu.cn/index.php?c=Index&a=zswd_detail&id=939"
    # ]

    def comb_text(self, lists, sep=" "):
        ret = ""
        for l in lists:
            l = l.replace(u"\xa0", " ").strip()
            if l != '':
                ret += l + sep
        if ret == "":
            return ret
        return ret[:-1]

    def parse_list(self, sel, response, item):
        lists = sel.xpath('//span[@class="lm_new_zk"]/a/text()').extract()
        item['datas'] = self.comb_text(lists, " ")

    def parse_detail(self, sel, response, item):
        lists = sel.xpath('//div[@class="vid_wz"]/descendant::text()').extract()
        item['datas'] = self.comb_text(lists, "\n")

    def parse_zswd(self, sel, response, item):
        body = sel.xpath('//div[@class="consultation"]/table/tr/td')[1].xpath('./table/tr')
        title_list = body[0].xpath('./descendant::text()').extract()
        item['title'] = self.comb_text(title_list, "").replace(u"标题 ： ", "")
        lists = body[1:].xpath('./descendant::text()').extract()
        item['datas'] = self.comb_text(lists, " ")

    def parse(self, response):
        self.cnt += 1
        print(str(self.cnt) + "\t" + response.url)
        sel = Selector(response)
        item = ScrapyCstItem()
        item['url'] = response.url
        item['title'] = sel.xpath("//title/text()")[0].extract()
        if response.url.find("a=tlist") != -1 or \
                        response.url.find("a=small_list") != -1 or \
                        response.url == CstSpider.domain:
            self.parse_list(sel, response, item)
        elif response.url.find("a=detail") != -1:
            self.parse_detail(sel, response, item)
        elif response.url.find("a=zswd_detail") != -1:
            self.parse_zswd(sel, response, item)
        elif response.url == CstSpider.domain + "index.php":
            return
        else:
            print("ERROR! " + response.url)
            return

        urls = sel.xpath("//a/@href").extract()
        refs = []
        for url in urls:
            if url.startswith("."):
                url = url[1:]
            while url.startswith('/'):
                url = url[1:]
            if url != "#" and \
                    not url.startswith("http") and \
                    not url.startswith('mailto:') and \
                    not url.startswith("uploadfile") and \
                    not url.startswith('file') and \
                    not url.startswith('upfile'):
                url = CstSpider.domain + url
                refs.append(url)
                yield Request(url, callback=self.parse, dont_filter=False)
        item['refs'] = '["' + '","'.join(refs) + '"]'
        yield item
