#from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from itemextract.items import ItemextractItem
from itemextract.linkloader import LinkLoader
from urlparse import urlparse
import sqlite3
import re

class MySpider(CrawlSpider):
    name = 'my'

    rules = (
        Rule(SgmlLinkExtractor(allow=(''),tags=('a','area','form'),attrs=('href','action')), callback='parse_item',follow=True,process_links='filter_links',process_request='parse_requests'),
    )
    def parse_start_url(self,response):
#         i = ItemextractItem()
#         i['link']=response.url
#         return i
#        Initialize Sqlite database
        self.conn = sqlite3.connect("webcrawler")
        self.conn.isolation_level = None
        self.conn.execute("create table if not exists stat(id integer primary key autoincrement,path varchar(128) unique,count integer)")
        self.conn.execute("delete from stat")
        self.cu=self.conn.cursor()
#         print "item start from ",response.url
        # information collection
        

    def parse_item(self, response):
#        false static
        file_extensions=['.asa','.inc','.bak','.old']
        for ext in file_extensions:
            if response.url.endswith(ext):
                print response.url,"is likely configuration or backup file."
                return
        if re.search(r'admin',response.url,re.I):
            print response.url,"is likely admin page."
            
        i = LinkLoader(item=ItemextractItem(),response=response)
        #sel=Selector(response)
        #i['domain_id'] = sel.xpath('//input[@id="sid"]/@value').extract()
        #i['name'] = sel.xpath('//div[@id="name"]').extract()
        #i['description'] = sel.xpath('//div[@id="description"]').extract()
        i.add_value('link',response.url)
        #i.add_value('content',unicode(response.body,'utf-8'))
        return i.load_item()
        
    def filter_links(self,links):
        pagelimit=self.settings.getint("PAGES_PER_PATH")
        follows = []
        for link in links:
            parsed_url = urlparse(link.url)
            if len(parsed_url.path)<2:
                continue
            
            if self.cu.execute("select path from stat where path = '%s'" % parsed_url.path).fetchone():
                if self.conn.execute("select count from stat where path = '%s'" % parsed_url.path).fetchone()[0]<pagelimit:
                    self.conn.execute("update stat set count=count+1 where path = '%s'" % parsed_url.path)
                    follows.append(link)
            else:
                self.conn.execute("insert into stat(path,count) values('%s',1)" % parsed_url.path)
                follows.append(link)

        return follows
    
    def parse_requests(self,requests):
        
        return requests
    
    def __del__(self):
        self.cu.close()
        self.conn.close()
        
    
