# coding=gbk
__author__ = 'Soul'

IP_ROUTE_FILE_NAME = "E://Workspace/carhr/route.csv"
CRAWED_FILE_NAME = "E://Workspace/carhr/to_check_outofdated_websites.txt"
TEMPLATE_PATH = "E://Workspace/carhr/templates/"
RESULT_FILE_PATH = "E://Workspace/carhr/outofdate.txt"

def get_template(website_dmoain, website_name):
    global route_map_array
    ip = IpRouter.getIp(website_dmoain)
    for route_item in route_map_array:
        import re
        if re.search(route_item['ip_pattern'], ip) != None:
            keywds = route_item['website_keywords']
            for kw in keywds:
                if website_dmoain.find(kw) != -1 or website_name.find(kw) != -1:
                    return route_item['website_tpl']
    return ''

def loadHtmlContent(url):
    import urllib2
    c = None
    try:
        headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
        req = urllib2.Request(url=url, headers=headers)
        c = urllib2.urlopen(req)
    except Exception as e:
        print e
        return ""

    if not c == None:
        f = c.read()

        import chardet
        encoding = chardet.detect(f)['encoding']
        if encoding == None or encoding == 'None' or ''.join(encoding.split()) == '':
            encoding = 'UTF8'

        try:
            f = f.decode(encoding)
        except Exception as e:
            if encoding.lower() == 'gb2312':
                try:
                    f = f.decode('gbk')
                except Exception as e:
                    print e

        f = f.lstrip("\r\n")
        if f.find('<!DOCTYPE') != -1:
            lines = f.split('\n')
            rm_head_lines = lines[1:(len(lines) -1)]
            f = ''.join(rm_head_lines)

        return f

def loadTemplate(webName, url):
    domain = url.split('/')[2]
    xmlFile = get_template(domain, webName)
    xmlFile = ''.join(xmlFile).strip()

    if xmlFile == '':
        xmlFile = webName + ".xml"

    from lxml import etree
    tmpl_filename = TEMPLATE_PATH + xmlFile


    import chardet
    encoding = chardet.detect(tmpl_filename)['encoding']
    if encoding == None or encoding == 'None' or ''.join(encoding.split()) == '':
        encoding = 'UTF8'

    tmpl_filename = tmpl_filename.decode(encoding)
    print 'loadTemplate:: parsing ' + tmpl_filename

    import os
    if os.path.exists(tmpl_filename) and not(os.path.getsize(tmpl_filename) == 0):
        tree = etree.parse(tmpl_filename)
        return tree
    else:
        print ('loadTemplate:: No template exist for %s.xml[%s]') % (xmlFile, webName)
        return None

def loadWebsites(path):
    crawed_file = open(path)
    lines = crawed_file.readlines()
    param_map = {}
    for line in lines:
        if line and line.strip() and (not line[0] == '#'):
            key = "".join((line.split(','))[0])
            val = "".join((line.split(','))[1])
            param_map[key.strip()] = val.strip()
    crawed_file.close()
    return param_map

def getPureContent(x_content):
    if isinstance(x_content, list):
        if len(x_content) > 0:
            x_content = x_content[0]
        else:
            return ''

    value = ''
    if hasattr(x_content, 'text'):
        value = x_content.text
    else:
        value = x_content

    removelist = [u'\\xa0', u'\\r', u'\\n', u' ', u'\\t', u'\\a', u'\\b', u'\\f', u'\\v', u'\\0']

    if isinstance(value, unicode):
        t = value.encode('unicode_escape')
        for rl in removelist:
            t = t.replace(rl, u'')

        start = 0
        count = 0
        while start < len(t):
            i = t.find('\\', start, len(t))
            if t[i + 1] == ('x') or t[i + 1] == ('X'):
                count = count + 1
            if start > i:
                break
            start = i + 1

        if t.count('\\') == 0:
            rate = 0
        else:
            rate = (float)(count) / (float)(t.count('\\'))
        if rate > 0.5:
            value = value.encode('raw_unicode_escape')
        return value
    else:
        return value

def getContentFormXpathList(pasred_html, str_xpath_list):
    content = ''
    xpathlist = str_xpath_list.split('||')
    for xpath in xpathlist:
        parsed_value = pasred_html.xpath(xpath)
        content = getPureContent(parsed_value)
        if not content == '':
            break

    return content

def isTemplateOutOfDate(xpath_jobname, xpath_comname, xpath_jobdscpt, xpath_comdscpt, html_content):
    from lxml.html import fromstring
    try:
        pasred_html = fromstring(html_content)

        jobname = getContentFormXpathList(pasred_html, xpath_jobname)
        if jobname != '': return False

        comname = getContentFormXpathList(pasred_html, xpath_comname)
        if comname != '': return False

        jobdscpt = getContentFormXpathList(pasred_html, xpath_jobdscpt)
        if jobdscpt != '': return False

        comdscpt = getContentFormXpathList(pasred_html, xpath_comdscpt)
        if comdscpt != '': return False

        return True
    except Exception as e:
        print e
        return False

def _getTextContent(x_content):
    if isinstance(x_content, list):
        if len(x_content) > 0:
            x_content = x_content[0]
        else:
            x_content = ''

    value = ''
    if hasattr(x_content, 'text'):
        value = x_content.text
    else:
        value = x_content
    return value

if __name__ == "__main__":
    from IpRoute import *
    ipRoute = IpRouter(IP_ROUTE_FILE_NAME) #Load route file content

    CrawledDict = loadWebsites(CRAWED_FILE_NAME)
    f = open(RESULT_FILE_PATH, 'a')

    for key in CrawledDict.keys():
        xmlContent = loadTemplate(key, CrawledDict[key])
        if xmlContent == None:
            continue

        # get xml path values for jobname, comname, jobdscpt, comdscpt
        xpath_jobname = _getTextContent(xmlContent.xpath("//PageTemplate/Job/JobInfo/JobName"))
        xpath_comname = _getTextContent(xmlContent.xpath("//PageTemplate/Company/CompanyName"))
        xpath_jobdscpt = _getTextContent(xmlContent.xpath("//PageTemplate/Job/Description/p"))
        xpath_comdscpt = _getTextContent(xmlContent.xpath("//PageTemplate/Company/Introduction/p"))

        html_content = loadHtmlContent(CrawledDict[key])
        if isTemplateOutOfDate(xpath_jobname, xpath_comname, xpath_jobdscpt, xpath_comdscpt, html_content):
            print ("%s.xml is Out of date!\n") % (key)
            f.write(("%s.xml is Out of date!\n") % (key))
    f.close()