import scrapy
import pymongo
class SearchSpider(scrapy.Spider):
    name = "pages"

    def start_requests(self):
        DB = pymongo.MongoClient('192.168.1.220:29001').news
        for doc in DB['baidu_news_search'].find({'get_source':None},{'_id':1}).sort("_id",pymongo.DESCENDING):
            url = doc.get('_id')
            if not DB['news_page'].find_one({'_id':url},{'_id':1}):
                if not url.endswith('pdf'):
                    yield scrapy.Request(url, callback=self.parse, )
            else:
                DB['baidu_news_search'].update_one({'_id':url},{'$set':{'get_source':True}})


    def parse(self, response):
        description = response.css("head > meta[name=description]::attr(content)").extract_first()
        keywords = response.css("head > meta[name=keywords]::attr(content)").extract_first()
        title = response.css("head > title::text").extract_first()
        url = response.url
        return {
            'description': description,
            'keywords': keywords,
            'title': title,
            'url': url,
            'html': response.text,
        }
        pass

