# -*- coding: utf-8 -*-
import scrapy

from baiduNewsSpider.items import NewsItem

class NewsspiderSpider(scrapy.Spider):
    name = 'newsspider'
    allowed_domains = ['']
    start_urls = ['']

    def start_requests(self):
        with open('url.csv') as urlfile:
            urllist = urlfile.readlines()[1:]
        return [scrapy.FormRequest(_) for _ in urllist]

    def parse(self, response):
        url = response.url
        urlname = url.split('/')[2] # 使用split切割url，判断url属于那个网站
        if urlname == '3w.huanqiu.com': # 环球网
            title = response.xpath('//h1[@class="a-title"]/strong/text()').extract()[0]   #使用 response的selector解析 网页
            content = ''.join(response.xpath('//div[@class="a-con"]/p/text()').extract())
        elif urlname == 'baijiahao.baidu.com': # 百家号
            title = response.xpath('//div[@class="article-title"][1]/h2/text()').extract()[0]
            content = ''.join(response.xpath('//div[@id="article"]//text()').extract())
        elif urlname == 'news.cctv.com': # 央视新闻
            title = response.xpath('//div[@class="cnt_bd"][1]/h1/text()').extract()[0]
            content = ''.join(response.xpath('//div[@class="cnt_bd"][1]/p/text()').extract())
        else: # 其他
            title = response.xpath('//h/text()').extract()[0]
            content = ''.join(response.xpath('//p/text(').extract())
        yield NewsItem(title=title, url=url, content=content)
