import pymongo
import os,re
import time

client = pymongo.MongoClient("localhost", 27017)
db = client.test

import scrapy
# import scrapy.downloadermiddlewares
# import scrapy.downloadermiddlewares.DownloaderMiddleware


# DOWNLOADER_MIDDLEWARES = {
#     'testdown': 543,
#     'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# }
import td
class BlogSpider(scrapy.Spider):
    name = 'blogspider'
    # start_urls = ['http://www.njlixue.cn/']
    # allow_domains=['www.njlixue.cn']
    start_urls = ['http://localhost/s1.html']
    allow_domains=['localhost']
    custom_settings = {
        'DEPTH_LIMIT': '1',
        'DOWNLOADER_MIDDLEWARES' : {
    'td.testdown': 543,
    'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}

        
    }

    
    def parse(self, response):
        db.cache1.insert_one({"url" : response.url, "html": response.body})
        for next_page in response.css('a::attr(href)').getall():
            if not(re.search("javascript:",next_page)):
                next_page1 = response.urljoin(next_page)
                yield scrapy.Request(next_page1, callback=self.parse)
            # yield response.follow(next_page, self.parse)

# for rs in db.cache1.find({}):
#     print(rs["url"])
