#! /usr/bin/python
#-*- coding=utf-8 -*-

import util
import sys
from lxml import etree
import re
import json

def paste(lst1,lst2):
    result = []
    n = len(lst1)
    if n < len(lst2):
            n = len(lst2)
    idx = 0
    while idx < n:
            v1 = ""
            if idx < len(lst1):
                    v1 = lst1[idx]
            if type(v1) != type([]):
                v1 = [v1]
            v2 = ""
            if idx < len(lst2):
                    v2 = lst2[idx]
            v2 = [v2]
            result.append(v1+v2)
            idx = idx + 1
    return result

def parse(html,url,next_xpath,data_xpath,pagesize_xpath,post_xpath):
    data = []
    tree = etree.HTML(html)
    for item in data_xpath:
            nodes = tree.xpath(item[0])
            if len(nodes) < 1:
                    continue
            tmp = []
            for i in range(0,50):
                    node = nodes[i]
                    value = util.get_node_info(node,item[1])
                    if value is not None:
                            if value.startswith("showDialog("):
                                    value = value[11:-1].split(",")[0].strip("'\" ")
                                    value = util.parse_url(url,value)
                            else:
                                    value = re.sub("[\t\r\n ]+"," ",value)
                    else:
                            value = ""
                    tmp.append(value)
            data.append(tmp)
    if len(data) > 0:
            data2 = [[]]*len(data[0])
            for i in range(0,len(data)):
                    data2 = paste(data2,data[i])
            data = data2
    next_url = None
    url_nodes = tree.xpath(next_xpath[0])
    if len(url_nodes) > 0:
            value = util.get_node_info(url_nodes[0],next_xpath[1])
            next_url = util.parse_url(url,value)
    post_nodes = tree.xpath(post_xpath)
    data_map = None
    pagesize = None
    if len(post_nodes) > 0:
            data_map = {}
            for node in post_nodes:
                    names = set(node.keys())
                    if "name" in names:
                            name = node.get("name")
                            value = ""
                            if "value" in names:
                                    value = node.get("value")
                            data_map[name] = value
            pagesize_nodes = tree.xpath(pagesize_xpath[0])
            if len(pagesize_nodes) > 0:
                    value = util.get_node_info(pagesize_nodes[0],pagesize_xpath[1])
                    if value is not None:
                            if value.startswith("jumppage("):
                                    params = value[9:].split(",")
                                    if len(params) > 2:
                                            data_map["pageNo"] = params[0].strip("'\" ")
                                            pagesize = int(params[2].strip("'\" "))

    next_url_post = (next_url,data_map)
    return (data,next_url_post,pagesize)

def get_text(node):
    result = None
    if node.text is None:
            result = []
            for text in node.itertext():
                    result.append(text.strip())
            result = " ".join(result)
    else:
            result = node.text.strip()
    return result

def parse_detail(html):
        html = re.sub("[\r\n]","\t",html)
        html = re.sub("<!\-\-.+?\-\->", "", html)
        tree = etree.HTML(html)
        nodes = tree.xpath("//table[@class=\"f-lbiao\"]/tr")
        info_map = {}
        idx=0
        for node in nodes:
                idx = idx + 1
                key = None
                ii = 0
                for child in node.iterchildren():
                        ii = ii+1
                        value = get_text(child)
                        #if child.text is None:
                        #        print idx,ii
                        if key is None:
                                key = value
                                #print child.text
                        else:
                                #print child.text
                                info_map[key] = value
                                key = None
        #for key in info_map.keys():
        #        print key,info_map[key]
        return info_map

def test():
        log = util.log("log/log.txt", "log/error.txt")
        url = "http://qyxy.baic.gov.cn/lucene/luceneAction!NetCreditLucene.dhtml?currentTimeMillis=1449649223028&credit_ticket=FABBED17D0B923CB580E3302552216DD"
        data = "queryStr=%E4%BF%A1%E6%81%AF%E6%9C%8D%E5%8A%A1&module=&idFlag=qyxy&SelectPageSize=50&EntryPageNo=3&pageNo=4&pageSize=50&clear=true"
        #print util.get_page({"url":url}, data ,log).encode("utf-8")
        next_xpath = ("//form[@id=\"creditForm\"]", ("attribute","action"))
        post_xpath = "//form[@id=\"creditForm\"]//input"
        data_xpath = (("//form[@id=\"creditForm\"]/div/table/tr/td[@class=\"cx00\"]/font/a",("attribute","onclick")),("//form[@id=\"creditForm\"]/div/table/tr/td[@class=\"cx00\"]/font/a",("alltext",)), ("//form[@id=\"creditForm\"]/div/table/tr/td[@class=\"cx00\"]", ("alltext",)))
        pagesize_xpath = (u"//a[@title=\"下一页\"]",("attribute","onclick"))
        parse_t = lambda h,u:parse(h,u,next_xpath,data_xpath,pagesize_xpath,post_xpath)
        next_url = url
        post_data = data
        while next_url is not None and post_data is not None:
                html = util.get_page({"url":next_url}, post_data ,log)
                #html = open("result.html").read().decode("utf-8")
                sys.stderr.write(("==========url:%s,data:%s\n" % (next_url,str(post_data))))
                open("t.html","w").write(html.encode("utf-8"))
                html = re.sub("[\r\n]","\t",html)
                html = re.sub("<!\-\-.+?\-\->", "", html)
                (data, next_url_post,pagesize) = parse_t(html,url)
                for item in data:
                        #print item[0],re.sub("[\t ]","",item[1]),item[2]
                        if len(item) > 2:
                                detail_html = util.get_page({"url":item[0]}, post_data ,log)
                                detail_map = parse_detail(detail_html)
                                print json.dumps([item[0],re.sub("[\t ]","",item[1]),item[2],detail_map])
                next_url = next_url_post[0]
                post_data = next_url_post[1]
                post_data["queryStr"] = post_data["queryStr"].encode("utf-8")
                post_data["SelectPageSize"] = 50
                post_data["pageSize"] = 50
                if int(post_data["pageNo"]) > pagesize:
                        break
def test2():
    log = util.log("log/log.txt", "log/error.txt")
    url = "http://s.weibo.com/weibo/nba?topnav=1&wvr=6&b=1"
    post_data = None
    print util.get_page({"url":url}, post_data ,log).encode("utf-8")

if "__main__" == __name__:
        util.main(test)
