import uuid
from urllib.parse import urlparse
from RSpider.items import OschinaItem
from scrapy_redis.spiders import RedisSpider
from scrapy import Request
# from RSpider.statscol.graphite import RedisStatsCollector
from scrapy.contrib.linkextractors import LinkExtractor

#from RSpider.items import TestItem
class BaseSpider(RedisSpider):
    name = 'spider'
    redis_key = 'spider_url:start_urls'
    # start_urls = ['http://www.elian.net/']

    def parse(self, response):
        str_reg = ""
        if self.redis_data is not None:
            for str_url, i in zip(self.redis_data, range(0, len(self.redis_data))):
                tag = "|"
                k_left = "["
                k_right = "]"
                if len(self.redis_data) == 1:
                    k_left = ""
                    k_right = ""
                if i + 1 == len(self.redis_data):
                    tag = ""
                print(str_url)
                str_reg = str_reg + k_left + urlparse(str_url).netloc.replace("www.", "") + k_right + tag
        # 使用链接提取器提取全文链接
        linkSelect = LinkExtractor(allow=".*"+str_reg+".*")
        links = linkSelect.extract_links(response=response)
        for link_sel in links:
            yield Request(link_sel.url, callback=self.parse)
            item = OschinaItem()
            item['c_Url'] = link_sel.url
            link_text = response.xpath("//html//text()").extract()
            page_content = ""
            for i in link_text:
                i = str(i).strip().replace(" ","").replace("\n","").replace("'","")
                if i:
                    page_content = page_content + i
            if link_text:
                item['c_content'] = page_content
            else:
                item['c_content'] = None
            link_title = response.xpath("/html/head/title/text()").extract()
            item['c_title'] = str(link_title).replace("'","")
            item['hashcode'] = str(uuid.uuid1())
            yield item






