#coding: utf8
import os
import json
import urllib2, urllib
import cPickle as pickle
import traceback
import sina_wordparam
from time import time as now
from copy import deepcopy

g_tree_cache = {}

g_segword_cache = {}

def load_cache():
    if os.access('sentence_tree.cache', os.R_OK):
        data = pickle.load(file('sentence_tree.cache', 'r'))
        if data.get('save_time', 0) > 1365591379.361:#2013.4.3
            g_tree_cache.update(pickle.load(file('sentence_tree.cache', 'r')))
    
    if os.access('segword.cache', os.R_OK):
        data = pickle.load(file('segword.cache', 'r'))
        if data.get('save_time', 0) > 1365591379.361:
            g_segword_cache.update(pickle.load(file('segword.cache', 'r')))

def dump_cache():
    g_tree_cache['save_time'] = now()
    pickle.dump(g_tree_cache, file('sentence_tree.cache', 'w'))
    g_segword_cache['save_time'] = now()
    pickle.dump(g_segword_cache, file('segword.cache', 'w'))

def is_dev_env():
    return 'SERVER_SOFTWARE' not in os.environ

def get_str(obj):
    return json.dumps(obj, ensure_ascii=False)

def unique_resp(func):
    def _func(*args, **kwargs):
        res = func(*args, **kwargs)
        return type(res)(set(res))
    return _func

def segword(text, text_charset = 'UTF-8'):
    if text in g_segword_cache:
        return g_segword_cache[text]
    if is_dev_env():
        _SEGMENT_BASE_URL = 'http://sleepingforest.sinaapp.com/segword'
    else:
        _SEGMENT_BASE_URL = 'http://segment.sae.sina.com.cn/urlclient.php'
    payload = urllib.urlencode([('context', text.encode('utf8')),])
    args = urllib.urlencode([('word_tag', 1), ('encoding', text_charset),])
    url = _SEGMENT_BASE_URL + '?' + args
    result = urllib2.urlopen(url, payload).read().decode('utf8')
    if not is_dev_env():
        result = json.loads(result)
        for word in result:
            tag_id = word['word_tag']
            word['word_tag'] = sina_wordparam.id2tag[tag_id]
            word['word_cn_tag'] = sina_wordparam.id2cntag[tag_id]
            word['word_std_tag'] = sina_wordparam.id2stdtag.get(tag_id, "")
        result = json.dumps(result, ensure_ascii=False)

    g_segword_cache[text] = result
    #print 'segword result: %s'%result
    return result

#word_tags_to_handle = ('POSTAG_ID_R', 'POSTAG_ID_N', 'POSTAG_ID_V_O')
word_tags_to_handle = ('NP', 'DP', 'ADJP')
word_tags_to_skip = ('POSTAG_ID_Q')
def get_standard_sentence(sentence):
    segword_result = json.loads(segword(sentence))
    expect_DEC = False
    res = ""
    for word in segword_result:
        if word['word_tag'] in word_tags_to_skip:
            res += word['word']
            continue
        if expect_DEC:
            #if word['word_std_tag'] == 'VP': res += u"地"
            if word['word_std_tag'] == 'NP': 
                res += u"的"
        res += word['word']
        expect_DEC = word['word_std_tag'] in word_tags_to_handle
    #print 'standard tree sentence', res
    return res

def raw_get_tree_struct(text):
    if text in g_tree_cache:
        return SentenceTree(g_tree_cache[text])
    url_text = urllib.urlencode([("str", text.encode('utf8')),])
    url = r"http://myserver.cstfirst.org/parser/stanford_parser_test.aspx?" + url_text
    result = urllib2.urlopen(url).read().decode('utf8')
    g_tree_cache[text] = result
    return SentenceTree(result)

def get_tree_struct(text):
    text = get_standard_sentence(text)
    return raw_get_tree_struct(text)
    

class SentenceTree(object):
    #just for stanford parse
    grammar_replace = {u'VV': u'VP',
            u'VE': u'VP', 
            u'VC': u'VP', 
            u'NN': u'NP',
            u'NR': u'NP',
            u'DT': u'DP',
            u'VA': u'ADJP',
            #'SP': '~', 
            }

    def __init__(self, tree=None):
        if isinstance(tree, str) or isinstance(tree, unicode):
            self.data = json.loads(tree)
        elif isinstance(tree, dict):
            self.data = tree
        elif isinstance(tree, SentenceTree):
            self.data = deepcopy(tree.data)
        elif tree is None:
            self.data = {}
        else:
            raise ValueError('Wrong type of tree(%s)' % type(tree))

        self.align_nodes = {}

    def __str__(self):
        return unicode(self.data)

    def json(self):
        return json.dumps(self.data, ensure_ascii=False)

    def _get_leaves(self):
        stack = [self.data]
        result = []
        while stack:
            item = stack.pop()
            if item.get('Word', ''):
                result.append(item)
            else:
                stack.extend(reversed(item.get('Child', [])))
        return result 

    def to_sentence(self):
        return ''.join([x.get('Word', '') for x in self._get_leaves()])

    def get_grammar(self):
        return [(x['Word'], self.grammar_replace.get(x['Token'], x['Token'])) 
                for x in self._get_leaves()]

    def mark_leaves_num(self):
        for i, node in enumerate(self._get_leaves()):
            node['Pos'] = i

    def get_child_by_offset(self, offset, check_name = ""):
        try:
            res = SentenceTree(self.data['Child'][offset]);
            if check_name and res.root_token != check_name:
                print res.root_token, '---not match---', check_name
                raise ValueError
            return res
        except Exception, e:
            traceback.print_exc()
            raise ValueError

    def copy_one_level(self): #可以优化性能
        res = deepcopy(self.data)
        if 'Child' not in res: res['Child'] = []
        for child in res['Child']:
            child['Child'] = []
        return SentenceTree(res)

    def update(self, tree):
        self.data.update(tree.data)

    def get_align_nodes(self):
        res = []
        if self.root_align:
            res.append(self)
        for child in self.root_children:
            res.extend(child.get_align_nodes())
        return res

    def get_child_by_align(self, align):
        if align in self.align_nodes:
            return self.align_nodes[align]

        for node in self.get_align_nodes():
            self.align_nodes[node.root_align] = node

        if align not in self.align_nodes:
            print align, 'not in', self.align_nodes
        return self.align_nodes.get(align, None)

    @property
    def root_token(self):
        return self.data.get('Token', "")

    @root_token.setter
    def root_token(self, token):
        self.data['Token'] = token

    @property
    def root_word(self):
        return self.data.get('Word', "")
    
    @property
    def root_children(self):
        return [SentenceTree(child) for child in self.data.get('Child', [])]

    @property
    def root_align(self):
        return self.data.get('Align', "")

    @root_align.setter
    def root_align(self, align):
        self.data['Align'] = align

if __name__ == '__main__':
    s = u'这款手机会容易摔坏吗？'
    print 'standard %s -> %s' % (s, get_standard_sentence(s))
    
