#! /usr/bin/python
#-*- coding=utf-8 -*-

import urllib2
import urllib
import cookielib
import time
import socket
import traceback
import chardet
import StringIO
import gzip
import sys
from lxml import etree
import json
import os

Headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.103 Safari/537.36',
        
    }
ld = None
cj = None

def print_cookie():
        print "="*20
        global cj
        if cj is None:
                return
        cks = cj._cookies
        for key in cks.keys():
                scks = cks[key]
                for skey in scks.keys():
                        sscks = scks[skey]
                        for sskey in sscks.keys():
                                print key,skey,sskey,sscks[sskey]
        print "="*20

def load_cookies(fp, cj):
    for line in fp:
            items = line.strip().split("\t")
            tmp = []
            if len(items) > 3:
                    tmp.append(items[0])
                    tmp.append(items[1])
                    m1 = {"version":"0"}
                    m1["path"] = items[3]
                    turl = None
                    if len(items[2].strip()) > 0:
                            items[2] = items[2].strip()
                            if items[2][0] == ".":
                                    m1["domain"] = items[2]
                                    turl = items[2]
                            else:
                                    turl = "http://"+items[2]
                    tmp.append(m1)
                    if len(items) > 6 and len(items[6].strip()) > 0:
                            tmp.append({"HttpOnly":None})
                    else:
                            tmp.append({})
                    url = None
                    if turl[0] == ".":
                        url = "http://www"+turl
                    elif turl[:4] != "http":
                        url = "http://"+turl
                    else:
                        url = turl
                    fake_request = urllib2.Request(url)
                    cookie = cj._cookie_from_cookie_tuple(tmp,fake_request)
                    cj.set_cookie(cookie)

def save_cookies(cookieMp, fp):
    result = []
    cookies = []
    for key in cookieMp.keys():
            scks = cookieMp[key]
            for skey in scks.keys():
                    sscks = scks[skey]
                    for sskey in sscks.keys():
                            cookies.append(sscks[sskey])
    for cookie in cookies:
            tmp = []
            tmp.append(cookie.name)
            tmp.append(cookie.value)
            tmp.append(cookie.domain)
            tmp.append(cookie.path)
            result.append("\t".join(tmp))
    fp.write("\n".join(result))

class log:
    def __init__(this, info_file, error_file):
        this.info_stream = open(info_file, 'a')
        this.error_stream = open(error_file, 'a')

    def push(this, stream, msg, tp):
        stream.write(('%s\t%s\t%s\n' % (tp, time.ctime(), msg)).encode('utf-8'))

    def log_info(this, msg, tp='info'):
        this.push(this.info_stream, msg, tp)

    def log_error(this, msg, tp='error'):
        this.push(this.error_stream, msg, tp)

def init():
    socket.setdefaulttimeout(5)
    global cj
    cj = cookielib.CookieJar()
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)
    if os.path.exists("data/.cookie"):
            fp = open("data/.cookie")
            load_cookies(fp, cj)
            fp.close()

def clean():
    global cj
    if cj is not None:
            fp = open("data/.cookie","w")
            save_cookies(cj._cookies,fp)
            fp.close()

def decode_content(html,encoding=None):
    if encoding is not None:
        if encoding.lower().strip() == "gzip":
            tmp = StringIO.StringIO(html)
            gzipper = gzip.GzipFile(fileobj=tmp)
            html = gzipper.read()
            result = chardet.detect(html)
            return  html.decode(result['encoding'], 'ignore')
    result = chardet.detect(html)
    if result['encoding'] is None:
        tmp = StringIO.StringIO(html)
        gzipper = gzip.GzipFile(fileobj=tmp)
        html = gzipper.read()
        result = chardet.detect(html)
        return  html.decode(result['encoding'], 'ignore')
    else:
        return html.decode(result['encoding'], 'ignore')
        
def get_page(status, data=None, log=None, headers=None):
    time.sleep(1)
    html = ''
    code = ''
    retry = 0
    url = status["url"]
    if headers is None:
        headers = Headers
    if "referer" in status:
        headers["referer"] = status["referer"]
    if data is not None and type(data) == type({}):
        data = urllib.urlencode(data)
    response = None
    while True:
        try:
            request = urllib2.Request(url, data, headers)
            response = urllib2.urlopen(request)
            code = str(response.getcode())
            if log is not None:
                log.log_info('%s\t%s\n' % (url, code))
            else:
                sys.stdout.write(('%s\t%s\n' % (url, code)).encode('utf-8'))
            if code[0] == '2':
                html = response.read()
                html = decode_content(html, response.headers.getheader("Content-Encoding"))
            else:
                print response.geturl()
                html = ""
            break
        except Exception,e:
            if hasattr(e, "getcode"):
                code = str(e.getcode())
                if code[0] == "3":
                    html = e.geturl()
                    break
            retry += 1
            if retry > 5:
                if log is not None:
                    log.log_error(('%s\t%s\t%s\t%s' % (time.ctime(), url, code, str(e))), 'recovery')
                    log.log_error('\t'.join(traceback.format_stack()))
                else:
                    sys.stderr.write('%s\t%s\t%s\t%s\n' % (time.ctime(), url, code, str(e)))
                    sys.stderr.write('\n'.join(traceback.format_stack()))
                break
            else:
                time.sleep(retry*5)
    return html

def get_node_info(node, keys):
    result = None
    if 'text' == keys[0]:
        if node.text is not None and len(node.text) > 0:
            result = node.text.strip()
    elif 'alltext' == keys[0]:
        texts = []
        for text in node.itertext():
            texts.append(text)
        if len(texts) > 0:
            result = '\t'.join(texts).strip()
    elif 'attribute' == keys[0]:
        value = node.get(keys[1])
        if value is not None and len(value) > 0:
            result = value
    return result

def parse_url(surl, target, base=None):
    if not surl.startswith('http'):
        return None
    if target.startswith("javascript"):
        return None
    parts = surl.split('/')
    if target.startswith('http'):
        return target
    elif target.startswith('/'):
        return '/'.join(parts[:3])+target
    else:
        if base is not None:
            return base+target
        else:
            return '/'.join(surl.split('/')[:-1])+'/'+target

def parse(html, xpath_trees, url):
    tree = etree.HTML(html)
    result = {"data":[],"link":[]}
    node_list = []
    for xpath_tree in xpath_trees:
        node_list.insert(0, (tree,xpath_tree,result["data"]))

    while len(node_list) > 0:
        item = node_list.pop()
        data = item[2]
        conf = item[1]
        parent_node = item[0]
        nodes = parent_node.xpath(conf["xpath"])
        #print len(nodes)
        for node in nodes:
            subdata = {}
            subdata["name"] = conf["name"]
            if "target" in conf:
                subdata["curr"] = {}
                link = {}
                for target in conf["target"]:
                    value = get_node_info(node, target["info"])
                    if value is not None:
                        dest = None
                        tname = None
                        if "ttype" in target:
                            if target["ttype"] == "link":
                                value = value.split("#")[0]
                                dest = link
                                #result["link"].append({"name":target["name"],"referer":url.split("?")[0].split("#")[0],"url":parse_url(url, value)})
                            elif target["ttype"] == "url":
                                value = parse_url(url, value)
                                dest = subdata["curr"]
                            else:
                                dest = subdata["curr"]
                        else:
                            dest = subdata["curr"]
                        if "tname" in target:
                            tname = target["tname"]
                        else:
                            tname = target["name"]
                        #print "-------------",(dest is link),tname,value
                        dest[tname] = value
                if len(link) > 0 and "url" in link:
                    link["url"] = parse_url(url, link["url"])
                    link["referer"] = url.split("#")[0].split("?")[0]
                    result["link"].append(link)
                    if "name" not in link:
                        link["name"] = conf["name"]
            if "children" in conf:
                subdata["children"] = []
                for child in conf["children"]:
                    node_list.insert(0,(node,child,subdata["children"]))
            if "children" in subdata or "curr" in subdata and len(subdata["curr"])>0:
                data.append(subdata)
    return result   
        
def work(entry_urls, referer, init_path, post_process, process_data,logger):
    work_list = []
    for entry_url in entry_urls:
        work_list.append({"name":"search","url":entry_url,"referer":referer,"conf":init_path})
    count = 1
    while len(work_list) > 0:
        status = work_list.pop()
        html = get_page(status, None, logger)
        #html = open("test/test.html").read().strip().decode("utf-8")
        if len(html) > 0:
            result = parse(html, status["conf"],status["url"])
            #print "-------------------------"
            #print result
            #print "-------------------------"
            if len(result["link"]) == 0 and len(result["data"]) == 0:
                logger.log_error('empty result:'+status["url"]+','+status["name"], 'recovery')
            else:
                result = post_process(result,status)
                if len(result["link"]) > 0:
                    for item in result["link"]:
                        work_list.append(item)
                if len(result["data"]) > 0:
                    process_data(result["data"])

def main(func):
    init()
    func()
    clean()
