# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from urllib import parse
class TxnewSpider(scrapy.Spider):
    name = 'txnew'
    allowed_domains = ['tx.com']
    start_urls = ['https://news.qq.com/']
    # 爬取链接
    def parse(self, response):
        post_urls = response.css('.head .Q-tpList a.linkto::attr(href)').extract()
        for post_url in post_urls:
            yield Request(url=parse.urljoin(response.url, post_url.strip()), callback=self.parse_link,dont_filter=True)
            # url = parse.urljoin(response.url, post_url.strip())
            # yield {
            #     "url":url
            # }
    #爬取链接相对内容
    def parse_link(self,response):
        title = response.css('.hd h1::text').extract_first()
        yield {
            'title': title
        }

    # start_urls = ["http://stackOverflow.com/questions?sort=votes"]
    # def parse(self, response):
    #     for href in response.css('.question-summary h3 a::attr(href)'):
    #         full_url = response.urljoin(href.extract())
    #         yield scrapy.Request(full_url, callback=self.parse_question)
    #
    # def parse_question(self, response):
    #     yield {
    #         'title': response.css('h1 a::text').extract()[0],
    #         'votes': response.css('.question .vote-count-post::text').extract()[0],
    #         'body': response.css('.question .post-text').extract()[0],
    #         'tags': response.css('.question .post-tag::text').extract(),
    #         'link': response.url
    #     }