'''
Created on 2013-5-27

@author: Soul
'''

import urllib, urllib2, urlparse, cookielib
import os
from Configurator import Configurator
from threadWorker import ThreadPool
from Logger import * 
import Queue
from urlArrays import *

class NoExceptionCookieProcesser(urllib2.HTTPCookieProcessor):
    def http_error_403(self, req, fp, code, msg, hdrs):  
        return fp  
    def http_error_400(self, req, fp, code, msg, hdrs):  
        return fp  
    def http_error_500(self, req, fp, code, msg, hdrs):  
        return fp

class PageFetcher(object):
    def init(self, fetch_url, recurse=False):
        self.__url__ = None
        self.__mutex__ = threading.Lock()

        self.__url__ = fetch_url
        if self.__url__ == None: return False
        
        self.__html_src__ = None
        
        self.__page_uuid__ = None  # uid of this page
        self.__next_links__ = []  # containing links in this page
        
        self.__page_md5__ = None  # Md5 examing code for this page
        self.__time__ = None  # First fetch time
        self.__validate__ = lambda url: True
        
        self.__proxy_needed__ = (Configurator.get_val('proxy_enable') == 'true') 
        self.__cookie_needed__ = (Configurator.get_val('cookie_need') == 'true')
        self.__recurse__ = recurse
        
        self.__save_path__ = Configurator.get_val('page_lib')
        return True
    
    def set_recurse(self, recurse):
        self.__recurse__ = recurse

    # used to customize the fecth process
    def preFectch(self, params, headers): return

    def on_get_raw_html_src(self): return

    def _remove_doctype_(self):
        if self.__html_src__:
            self.__html_src__ = self.__html_src__.lstrip("\r\n")
            tag_start = self.__html_src__.upper().find('<!DOCTYPE')

            if tag_start != -1:
                tag_end = self.__html_src__.find('>', tag_start)
                if tag_start < tag_end:
                    self.__html_src__ = self.__html_src__[tag_end+1:]

            self.__html_src__ = self.__html_src__.lstrip("\r\n")

    def fetch(self):
        global logger
        '''
            Code here comes from Page http://blog.csdn.net/wklken/article/details/7364390
        '''
        logger.info("Fetching url: %s" % (self.__url__))
        
        # logger.info("proxy_needed: %s, cookie_needed: %s, recurse: %s, save_path: %s"%(self.__proxy_needed__, 
        #                                                                               self.__cookie_needed__,
        #                                                                               self.__recurse__,
        #                                                                               self.__save_path__))
            
        try:
            params = None
            
            if self.__proxy_needed__:
                proxy_handler = urllib2.ProxyHandler({Configurator.get_val('proxy_type') : Configurator.get_val('proxy_url')})
                opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
            
            if self.__cookie_needed__:
                cookie = cookielib.CookieJar()
                cookie_handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(cookie_handler)
                urllib2.install_opener(opener)
                params = {"username": Configurator.get_val('username'), \
                           "password": Configurator.get_val('password')}

            headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
            self.preFectch(params, headers)
            try:
                # add this to pretend to be not a spider
                req = urllib2.Request(url=self.__url__, headers=headers)
                if params:
                    reader = urllib2.urlopen(req, urllib.urlencode(params))
                else:
                    reader = urllib2.urlopen(req)
            
            except urllib2.HTTPError as e:
                logger.error(e)
                return False
            except ValueError as e:
                logger.error(e)
                return False

            self.__html_src__ = reader.read()

            self.on_get_raw_html_src()

            import chardet
            encoding = chardet.detect(self.__html_src__)['encoding']
            if encoding == None or encoding == 'None' or ''.join(encoding.split()) == '':
                encoding = 'UTF8'
            
            '''
                Here we should pass unicode to html_src
            '''
            try:
                self.__html_src__ = self.__html_src__.decode(encoding, 'ignore')
            except Exception as e:
                if encoding.lower() == 'gb2312':
                    try:
                        self.__html_src__ = self.__html_src__.decode('gbk')
                    except Exception as e:
                        print e

            # self.__html_src__ = self.__html_src__.lstrip("\r\n")
            # if self.__html_src__.upper().find('<!DOCTYPE') != -1:
            #     #f = f.decode('gbk')
            #     lines = self.__html_src__.split('\n')
            #     rm_head_lines = lines[1:(len(lines) -1)]
            #     self.__html_src__ = ''.join(rm_head_lines)
            
            self.__encoding__ = encoding
            # all convert to UTF8
            import sys
            # print sys.getfilesystemencoding()
            # self.__html_src__.decode(self.__encoding__, 'ignore').encode(self.__encoding__)
            logger.info("Fetch Success!")
            return True
        
        except urllib2.URLError, e:
            logger.error(e)
            logger.error("Fetch Failed!")
            return False

    def add_validation_rule(self, validate):
        self.__validate__ = validate  # user defined validation method
    
    @staticmethod
    def is_legal_url(url):
        import re
        r = re.match(r'^(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&amp;%\$\-]+)*@)?((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.[a-zA-Z]{2,4})(\:[0-9]+)?(/[^/][a-zA-Z0-9\.\,\?\'\\/\+&amp;%\$#\=~_\-@]*)*$',
                 url)
        r2 = re.match(r'^(https?|ftp|file)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]', url)
        return not((r == None) and (r2 == None))
    
    @staticmethod
    def get_netloc(url):
        if PageFetcher.is_legal_url(url):
            from urlparse import urlparse
            arr = urlparse(url)
            netloc = arr.netloc
            return netloc
        return None
    
    @staticmethod
    def get_legal_url(base_url, url):
        from urlparse import urljoin
        from urlparse import urlunparse
        
        arr = urlparse.urlparse(base_url)
        return PageFetcher.join_url(urlunparse((arr.scheme, arr.netloc, '', '', '', '')), url)        
    
    @staticmethod
    def join_url(base, url):
        from urlparse import urljoin
        from urlparse import urlunparse
        from posixpath import normpath
        
        url1 = urljoin(base, url)
        arr = urlparse.urlparse(url1)
        path = normpath(arr[2])
        if path == '/.' or path == '.': path = ''  # to fix url like http://su.ganji.com/.
        return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))
    
    def analysis(self):
        if self.__html_src__:
            import uuid, datetime, hashlib
            '''
                1. generate a uid for this page
                2. add time stamp
                3. add md5 code
                4. analysis the links in this page add it to surls_to_visit set 
            '''
            # make a UUID based on the host ID and current time
            self.__page_uuid__ = uuid.uuid1()
            self.__time__ = datetime.datetime.now()
            self.__page_md5__ = hashlib.md5(self.__html_src__).hexdigest().upper()
            
            global seen_urls, urls_to_visit
            if self.__recurse__:
                ulrs = self._get_urls_from_html()
                for url in ulrs:
                    # You must ensure that your url is in form of http://xxxx ...
                    if not PageFetcher.is_legal_url(url):
                        url = PageFetcher.get_legal_url(self.__url__, url)  # here we can get the standard http site
                        if not PageFetcher.is_legal_url(url): continue  # if still not standard form of url, we ignore it!!
                                
                    # url=urlparse.urlparse('http://www.baidu.com/index.php?username=guol')
                    # ParseResult(scheme='http', netloc='www.baidu.com', path='/index.php', params='', query='username=guol', fragment='')
                    val = self.__validate__(u=url, nloc=urlparse.urlsplit(self.__url__).netloc)
                    logger.info("validation result of %s is %s" % (url, val))
                    
                    if self.__mutex__.acquire(1):
                        seen = seen_urls.__contains__(url)
                        self.__mutex__.release()
                        
                    if self.__mutex__.acquire(1):    
                        visited = urls_to_visit.__contains__(url)
                        self.__mutex__.release()
                        
                    if (not seen) and (not visited) and val:
                        logger.info("append %s to urls_to_visit" % (url))
                        if self.__mutex__.acquire(1):
                            urls_to_visit.append(url)
                            self.__mutex__.release()
                    
                logger.info("append %s to seen_urls" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                # print "size of seen_urls ", len(seen_urls)
                
            
    def _get_urls_from_html(self):
        '''
            Code here learned from https://github.com/Joshkunz/spider.py/blob/master/spider.py
        '''
        if self.__html_src__:
            from lxml.html import fromstring
            
            parsed_html = fromstring(self.__html_src__)

            return parsed_html.xpath("//a/@href")  # get a list of the urls on a page
        
    
#    def store(self):
#        f = open(("%s/%s.txt") % (self.__save_path__, self.__page_uuid__), 'w')
#        ls = os.linesep
#        
#        f.write("%s: %s%s" % ('url', self.__url__, ls))
#        f.write("%s: %s%s" % ('uuid', self.__page_uuid__, ls))
#        f.write("%s: %s%s" % ('md5', self.__page_md5__, ls))
#        f.write("%s: %s%s" % ('time', self.__time__, ls))
#        f.write(ls)
#        f.write(self.__html_src__)
#        
#        f.close()
    
#    @staticmethod
#    def execute(*args, **kwds):
#        # refer to http://blog.csdn.net/qinyilang/article/details/5484415 to know the usage of *args, **kwds
#        # for keyword, value in kwargs.items():
#        # for arg in args:
#        # ars = args[0][0]
#        validate = args[0]
#        recurse = True
#        emp_count = 0
#        
#        while emp_count <= 1800:
#            fetcher = PageFetcher()
#            if not fetcher.init(recurse):
#                import time
#                time.sleep(1)  # sleep 2 seconds
#                emp_count = emp_count + 1
#                logger.info("sleep %d times to wait for urls_to_visit to be filled" % (emp_count))
#                continue
#            
#            fetcher.add_validation_rule(validate)
#            if fetcher.fetch():
#                fetcher.anlysis()
#                fetcher.store()
#            
#            del fetcher
    
# if __name__ == "__main__":
#    PageFetcher.execute(["http://www.baidu.com", "D:/"], None)
