import scrapy
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from webspider.items import WebspiderItem
import hashlib
from tld import get_tld, get_fld
import time

class WebsiteSpider(scrapy.Spider):
    name = 'website'
    #allowed_domains = ['www.7sbook.com']
    start_urls = [
        'https://www.7sbook.com'
        ]

    def parse(self, response):
        Web = WebspiderItem()
        #抓取网页数据
        Web['url'] = response.request.url
        Web['id'] = hashlib.md5(Web['url'].encode(encoding='UTF-8')).hexdigest()
        Web['title'] = response.xpath('//*/title/text()').extract_first()
        Web['keywords'] = response.xpath("//*/meta[@name='keywords']/@content").extract_first()
        Web['description'] = response.xpath("//*/meta[@name='description']/@content").extract_first()
        bodyhtml = response.xpath("/html/body").extract_first()
        bodyhtml = BeautifulSoup(bodyhtml,'html.parser').get_text()
        Web['bodyhtml'] = re.sub(r'[\t\r\n\s]', '', bodyhtml)
        Web['grab_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        print("+-------------------------------+")
        print(response.request.url)
        print("+-------------------------------+")
        for next_url in set(response.xpath('//*/a/@href').extract()):
            #补齐网址
            url = response.urljoin(next_url)
            #删除网址中 #参数
            url =url.partition('#')[0]
            goDomain = get_fld(url, fix_protocol=True)
            baseDomain = get_fld(response.request.url, fix_protocol=True)
            if goDomain == baseDomain:
                yield scrapy.Request(url, callback=self.parse)

        yield Web



