import scrapy
from ExploitDB_Spider.items import DownloadItem
import html
from scrapy.exceptions import DropItem

class Download(scrapy.Spider):
    name = 'Download'
    start_urls = [

        'https://old.exploit-db.com/webapps/',
        'https://old.exploit-db.com/remote/',
        'https://old.exploit-db.com/local/',
        'https://old.exploit-db.com/dos/'
    ]

    custom_settings = {
        # 'ITEM_PIPELINES': {'ExploitDB_Spider.pipelines.MyDownloadPipeline': 300, },
        'ITEM_PIPELINES': {'ExploitDB_Spider.pipelines.FileDownloadPipeline': 300, },
        'FILES_STORE': 'Download'
    }

    def parse(self, response):
        # print response.url
        selector = scrapy.Selector(response)
        list = selector.xpath('//table[@class="exploit_list bootstrap-wrapper"]/tbody/tr')
        item = DownloadItem()
        for piece in list:
            app_url = piece.xpath('td[@class="app"]/a/@href').extract()  # app_url
            if len(app_url) == 0 :
                continue
            item['file_urls'] = app_url
            item['title'] = piece.xpath('td[@class="description"]/a/@title').extract()  # Title
            item['platform'] = piece.xpath('td[@class="platform"]/a/@title').extract()  # Platform
            item['type'] = response.url.split("/")[3]  # Type
            item['category'] = piece.xpath('td[@class="description"]/a/@title').extract()[0].split(' - ')[-1]
            item['files'] = app_url[0].split("/")[4]
            yield item

        next = selector.xpath('//div[@class="pagination"]').re(r'href="(.*?)">next')
        if next:
            url = html.unescape(next[0])
            yield scrapy.http.Request(url, callback=self.parse)
