# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request
from urllib import parse
from txnews.items import SxjjItem
# scrapy crawl sxjj -o sxjj.json -s FEED_EXPORT_ENCODING=utf-8
class SxjjSpider(scrapy.Spider):
    name = 'sxjj'
    allowed_domains = ['sxxc.cn']
    count = 2
    start_urls = ['https://www.vzan.cc/f/s-631898?typeId=231093']
    while count < 20:
        next_url = 'https://www.vzan.cc/f/s-631898?page='+str(count)+'&typeId=231093'
        start_urls.append(next_url)
        count += 1


    def parse(self, response):
        post_urls = response.css('.all_theme_content .all_theme_title a.nota::attr(href)').extract()
        for post_url in post_urls:
            print(parse.urljoin(response.url,post_url))
            yield Request(url=parse.urljoin(response.url,post_url),callback=self.parse_detail,dont_filter=True)

        # count = count+1
        # next_url = "http://www.sxxc.cn/category.php?id=9&page="+count
        # yield Request(url=next_url,callback=self.parse)


    def parse_detail(self,response):
        retItem = SxjjItem();
        name = response.css('.content .vistor_infor_2:nth-child(2) .vistor_left_2 .vistor_name::text').extract_first()
        phone = response.css('.content .vistor_infor_2:nth-child(2) .infor_right_2 a.content-phoneNumber::text').extract_first()
        retItem['name'] = name;
        retItem['phone'] = phone
        yield retItem