# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request
from urllib import parse
# scrapy crawl sxxc -o sxxc02.json -s FEED_EXPORT_ENCODING=utf-8
class SxxcSpider(scrapy.Spider):
    name = 'sxxc'
    allowed_domains = ['sxxc.cn']
    count = 2
    start_urls = ['http://www.sxxc.cn/category.php?id=10']
    while count < 500:
        next_url = 'http://www.sxxc.cn/category.php?id=10&page='+str(count)
        start_urls.append(next_url)
        count += 1


    def parse(self, response):
        post_urls = response.css('.list_module span.title a::attr(href)').extract()
        for post_url in post_urls:
            yield Request(url=parse.urljoin(response.url,post_url),callback=self.parse_detail)

        # count = count+1
        # next_url = "http://www.sxxc.cn/category.php?id=9&page="+count
        # yield Request(url=next_url,callback=self.parse)


    def parse_detail(self,response):
        name = response.css('.table_2 tr td:nth-child(1)::text').extract_first()
        phone = response.css('.table_2 tr td:nth-child(2) input::attr(onclick)').extract_first()
        m = re.findall(r"1\d{10}", phone)
        if m:
            phone = m
        else:
            phone = 'no phone'
        yield {
            "name":name,
            "phone":phone
        }