import scrapy
import re
import time
from condb import ArxivTable

class ArxivSpider(scrapy.Spider):
    name = "arxiv"
    allowed_domains = ["arxiv.org"]
    start_urls = [
        "https://arxiv.org/list/cs.AI/recent",
        # "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
    ]
    

    def parse(self, response):
        # filename = response.url.split("/")[-2]
        # with open(filename, 'wb') as f:
        #     f.write(response.body)
        # item = ArxivItem()
        crawl_timestrarr = response.xpath("//*[@id='dlpage']/h3[1]/text()").extract()[0].split(',')
        timestamp = time.mktime(time.strptime(crawl_timestrarr[1].strip(), '%d %b %Y'))
        # print timestamp
         
        dbcon = ArxivTable()
        has_crawled = dbcon.has_crawled(timestamp)

        if(not has_crawled):
            items = []
            for sel in response.xpath("//*[@id='dlpage']/dl[1]/dt[*]/span/a[1]/@href"):
                item = {}
                item['arxiv_url'] = 'https://arxiv.org' + sel.extract()
                item['arxiv_no'] = re.sub('/abs/', '', sel.extract())
                # print item
                items.append(item)
            # print items

            names = []
            for title in response.xpath("//*[@id='dlpage']/dl[1]/dd[*]/div/div[1]/text()"):
                name = {}
                name['arxiv_name'] = title.extract().strip()
                if(name['arxiv_name'] != ''):
                    names.append(name)
            # print names

            ziplist =  zip(items,names)


            for it,n in ziplist:
                row = {}
                row['arxiv_url'] = it['arxiv_url']
                row['arxiv_no'] = it['arxiv_no']
                row['arxiv_name'] = n['arxiv_name']
                row['create_time'] = timestamp
                print dbcon.process_item(row)

            print dbcon.crawled_thetime(timestamp)    
        dbcon.close_db()    

            # print item['arxiv_name']

        # print item

        # for sel in response.xpath('//dl'):
        #     title = sel.xpath('dt/text()').extract()
        #     link = sel.xpath('dd/@href').extract()
        #     desc = sel.xpath('text()').extract()
        #     print title, link, desc
