import scrapy
from scrapy_redis.spiders import RedisCrawlSpider
import re
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from ..items import XiaoYuSetUrl,XiaoYuGetDetail
class SetUrl(RedisCrawlSpider):
    name = 'seturl2'
    # start_urls = ['http://bbs.xmfish.com/']
    redis_key='XiaoYu:url'
    allowed_domains = ['bbs.xmfish.com']
    rules = (
        Rule(LinkExtractor(allow=(r'read-htm-tid-\d+\.html',)),callback='parse_detail', follow=False),
        # Rule(LinkExtractor(allow=(r'read-htm-tid-\d+\.html',), restrict_xpaths=('//tr/td[@class="subject"]')),
        #      callback='list_parse', follow=False),
        # Rule(LinkExtractor(allow=(r'thread-htm-fid-\d+-page-\d+\.html',)),callback='list_parse', follow=False),
        Rule(LinkExtractor(allow=(r'thread-htm-fid-\d+\.html'), restrict_xpaths=('//tr/th/h2')), callback='page_parse',
             follow=False),
    )
    def page_parse(self,response): #根据分类信息提取出页码的数量 循环组合成网址
        all_page=response.xpath('//div[@class="fl"]/div[@class="pages"]/span[@class="fl"]').re('共(\d+)页')
        url_left = str(response.url).split('.html')[0]
        page = url_left + '-page-{}.html'
        if not all_page:
            page_range=1
        else:
            page_range=int(all_page[0])
        urls=[page.format(value) for value in range(1,page_range+1)]
        yield {'urls': urls}

    # def list_parse(self,response):
    #     urls=list(set(response.xpath('//a[re:test(@href,"^read-htm-tid-\d+.html$")]/@href').extract()))
    #     url_left = str(response.url).split('.html')[0]
    #     urls=[url_left+url for url in urls]
    #     print('解析url')
    #     print(urls)
    #     if len(urls)>0:
    #         yield {'urls':urls}

    def parse_detail(self, response):
        if not re.search('认证版块',response.xpath('string(//div[@class="regIgnore"])').extract_first()):
            try:
                id=re.search(r'[1-9]\d*', response.url).group()
                url=response.url
                title=response.xpath('string(//h1[@class="read_h1"])').extract_first()
                see, reply=response.xpath('//td[@class="floot_left"]/div/ul/li/em/text()').extract()
                author=response.xpath('//div[@class="readName b"]/a/text()').extract_first()
                homepage=str(url).split('/read')[0]+response.xpath('//div[@class="floot_leftdiv"]/a/@href').extract_first()
                time=response.xpath('//div[@class="read_t"][@id="readfloor_tpc"]//td[@class="floot_bottom"][@id="td_tpc"]//span[re:test(@title,"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")]/@title').extract_first()
                cates=response.xpath('string(//div[@class="read_t"][@id="readfloor_tpc"]//td[@class="floot_bottom"][@id="td_tpc"]/div[@class="cates"])').extract_first()
                content=response.xpath('string(//div[@class="read_t"][@id="readfloor_tpc"]//td[@class="floot_bottom"][@id="td_tpc"]//div[contains(@class,"f14")])').extract_first()
                imgs=response.xpath('//div[@class="read_t"][@id="readfloor_tpc"]//td[@class="floot_bottom"][@id="td_tpc"]//div[contains(@class,"f14")]//img').re('=\"(http://img.*?)\"')
                yield XiaoYuGetDetail({
                    'id':id,'url':url,'title':title,'see':see,'reply':reply,'author':author,
                    'homepage':homepage,'time':time,'cates':cates,'content':content,'imgs':imgs
                })
            except Exception as e:
                pass
        else:
            print('认证版块 无权访问')
        pass