import scrapy
from ExploitDB_Spider.items import SpiderItem
import html

class Spider(scrapy.Spider):
    name = 'Spider'
    # allowed_domains = ['exploit-db.com']
    start_urls = [

        'https://old.exploit-db.com/webapps/',
        'https://old.exploit-db.com/remote/',
        'https://old.exploit-db.com/local/',
        'https://old.exploit-db.com/dos/'
    ]

    custom_settings = {
        'ITEM_PIPELINES': {'ExploitDB_Spider.pipelines.SpiderPipeline': 300, }
    }

    def parse(self, response):
        # print response.url
        selector = scrapy.Selector(response)
        list = selector.xpath('//table[@class="exploit_list bootstrap-wrapper"]/tbody/tr')
        item = SpiderItem()
        for piece in list:

            if piece.xpath('td[@class="app"]/a/@href').extract() != "":
                app_url = piece.xpath('td[@class="app"]/a/@href').extract()  # app_url
                if len(app_url) == 0:
                    continue
                item['title'] = piece.xpath('td[@class="description"]/a/@title').extract()  # Title
                item['platform'] = piece.xpath('td[@class="platform"]/a/@title').extract()  # Platform
                item['type'] = response.url.split("/")[3]  # Type
                item['category'] = piece.xpath('td[@class="description"]/a/@title').extract()[0].split(' - ')[-1]
                item['app_url'] = app_url

                yield item

        next = selector.xpath('//div[@class="pagination"]').re(r'href="(.*?)">next')
        if next:
            url = html.unescape(next[0])
            yield scrapy.http.Request(url, callback=self.parse)
