'''
dot_listing.py

Copyright 2012 Tomas Velazquez

This file is part of w3af, w3af.sourceforge.net .

w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.

w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA

'''
import re

import core.controllers.outputManager as om
import core.data.kb.knowledgeBase as kb
import core.data.kb.vuln as vuln
import core.data.constants.severity as severity

from core.controllers.plugins.crawl_plugin import CrawlPlugin
from core.controllers.w3afException import w3afException
from core.controllers.core_helpers.fingerprint_404 import is_404
from core.data.db.disk_set import disk_set


class dot_listing(CrawlPlugin):
    '''
    Search .listing file and checks for files containing.
    @author: Tomas Velazquez ( tomas.velazquezz@gmail.com )
    '''
    
    def __init__(self):
        CrawlPlugin.__init__(self)
        
        # Internal variables
        self._analyzed_dirs = disk_set()
        self._dot_listing = '.listing'
        regex_str = '[a-z-]{10}\s*\d+\s*\d+\s*\d+\s*\d+\s*\w+\s*\d+\s*[0-9:]{4,5}\s*(\S+)'
        self._listing_parser_re = re.compile(regex_str)

    def crawl(self, fuzzable_request):
        '''
        For every directory, fetch a list of files and analyze the response.
        
        @parameter fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        '''
        for domain_path in fuzzable_request.getURL().getDirectories():
            if domain_path not in self._analyzed_dirs:
                self._analyzed_dirs.add( domain_path )
                self._check_and_analyze( domain_path )

    def _check_and_analyze(self, domain_path):
        '''
        Check if a .listing filename exists in the domain_path.
        @return: None, everything is saved to the self.out_queue.
        '''
        # Request the file
        url = domain_path.urlJoin( self._dot_listing )
        try:
            response = self._uri_opener.GET( url, cache=True )
        except w3afException,  w3:
            msg = ('Failed to GET .listing file: %s. Exception: %s.')
            om.out.debug( msg, (url, w3) )
        else:
            # Check if it's a .listing file
            if not is_404( response ):
                self._is_vuln = False
                parsed_url_list = []
                for filename in self._listing_parser_re.findall( response.getBody() ):
                    if filename != '.' and filename != '..':
                        parsed_url_list.append( domain_path.urlJoin( filename ) )

                self._tm.threadpool.map(self._get_and_parse, parsed_url_list)
                
                if self._is_vuln:
                    v = vuln.vuln()
                    v.setPluginName(self.getName())
                    v.set_id( response.id )
                    v.setName( '.listing file found' )
                    v.setSeverity(severity.LOW)
                    v.setURL( response.getURL() )
                    msg = ('A .listing file was found at: "%s". The contents'
                           ' of this file disclose filenames')
                    v.setDesc( msg % (v.getURL()) )
                    kb.kb.append( self, 'vuln', v )
                    om.out.vulnerability( v.getDesc(), severity=v.getSeverity() )

    def _get_and_parse(self, url):
        '''
        GET and URL that was found in the .listing file, and parse it.
        
        @parameter url: The URL to GET.
        @return: None, everything is saved to self.out_queue.
        '''
        try:
            http_response = self._uri_opener.GET( url, cache=True )
        except KeyboardInterrupt, k:
            raise k
        except w3afException, w3:
            msg = ('w3afException while fetching page in crawl.dot_listing, error: %s.')
            om.out.debug( msg, (w3) )
        else:
            if not is_404( http_response ):
                self._is_vuln = True
                for fr in self._create_fuzzable_requests( http_response ):
                    self.output_queue.put(fr)

    def get_long_desc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
        This plugin searches for the .listing file in all the directories and
        subdirectories that are sent as input and if it finds it will try to
        discover new URLs from its content. The .listing file holds information
        about the list of files in the current directory. These files are created 
        when download files from FTP with command "wget" and argument "-m" or 
        "--no-remove-listing". For example, if the input is:
            - http://host.tld/w3af/index.php
            
        The plugin will perform these requests:
            - http://host.tld/w3af/.listing
            - http://host.tld/.listing
        
        '''

