#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/4/27 下午3:11
# @Author  : zhangzhen
# @Site    : 
# @File    : corpus_utils.py
# @Software: PyCharm
import re
from com.utils.ioutils import ioutils
from collections import defaultdict
from xml.etree import ElementTree as ET
from com.syn.syn import syn_utils
from com.corpus import corpus
stop_word_path = '../../data/stopwords.txt'
# 加载停用词
stopList = [line.strip().decode('utf-8') for line in open(stop_word_path).readlines()]
types = ["happiness", "like", "surprise", "sadness", "disgust", "anger", "fear"]
length = [4420, 4345, 373, 1374, 801, 318, 328]
ds = ['../../data/test.xml', '../../data/train.xml']
dir_root = '../../data/'


def is_alphabet(uchar):
    """判断一个unicode是否是英文字母"""
    # if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
    if u'\u0041' <= uchar <= u'\u005a' or u'\u0061' <= uchar <= u'\u007a':
        return True
    else:
        return False


def is_contain_alphabet(sent):
    for w in sent:
        if is_alphabet(w):
            return True
    return False


class corpus_utils():
    # 语料路径
    @staticmethod
    def process_single(sent):  # 处理单条藏语语句
        if sent is None:
            return ''
        try:
            text = sent+' '
            s_tag = r'/(.*?)[ ]'
            text = re.sub(s_tag, ' ', text)
            # print text
            text = [w for w in text.split() if w not in stopList]
            # print text
            # pattern = re.compile(s_tag)
            # matchs = pattern.findall(sent)
            # for m in matchs:
            #     print m
            return ' '.join(text)
        except TypeError, e:
            print "TypeError", e.message
            print "TypeError", sent

    @staticmethod
    def get_3_pos(sent):  # 处理单条藏语语句
        pos = []
        text = sent + ' '
        pattern = re.compile(r'/(.*?)[ ]')
        matchs = pattern.findall(text)
        for m in matchs:
            pos.append(m)
        # print '-'.join(pos)
        pos_len = len(pos)
        res = []
        if pos_len > 3:
            for i in range(pos_len-2):
                tmp = pos[i:i+3]
                # print '-'.join(tmp)
                res.append('o'.join(tmp))
        if len(res) > 0:
            return ' '.join(res)
        else:
            return 'null'

    @staticmethod
    def init(dirs):
        """将 dirs 目录下所有语料合并处理"""
        corpus_dict = defaultdict(list)  # 去停用词及分词
        pos_corpus_dict = defaultdict(list)  # 词性标注&分词
        emoit_corpus_dict = defaultdict(list)  # 表情化
        tag_corpus_dict = defaultdict(list)  # 3pos化
        syn_corpus_dict = defaultdict(list)  # 3pos化
        pool = set()
        for d in dirs:
            root = ET.parse(d)
            nodes = root.getiterator('element')
            for node in nodes:
                childs = node.getchildren()
                # <element wid="M_A06Y7Bymy">
                # <user>木雅岗峙</user>
                # <text>བླ་མ་རིན་པོ་ཆེའི་དད་ལྡན་མང་ཚོགས་རྣམས་ལ་དམ་པའི་ཆོས་ཀྱིས་བསླབ་གསོ་གནང་མགོ་བརྩམ་སོང་།慈城罗朱堪布在木雅教佛法的[鲜花][鲜花][鲜花][落叶][落叶][落叶][落叶][太阳]</text>
                # <fine>བླ་མ་རིན་པོ་ཆེའི་དད་ལྡན་མང་ཚོགས་རྣམས་ལ་དམ་པའི་ཆོས་ཀྱིས་བསླབ་གསོ་གནང་མགོ་བརྩམ་སོང་།</fine>
                # <seg>བླ་/n མ་/nf རིན་/n པོ་/nf ཆེ/a འི་/kg དད་/n ལྡན་/v མང་/a ཚོགས་/n རྣམས་/n ལ་/kd དམ་/a པ/af འི་/kg ཆོས་/n ཀྱིས་/n བསླབ་/v གསོ་/v གནང་/v མགོ་/n བརྩམ་/v སོང་/v །/xp</seg>
                # <topic></topic>
                # <emo>[鲜花]&amp;[鲜花]&amp;[鲜花]&amp;[落叶]&amp;[落叶]&amp;[落叶]&amp;[落叶]&amp;[太阳]&amp;</emo>
                # <syn>[ [ [IP [NP [ng དོགས་ཟོན་]] [VP [vt བྱོས]]] [PU [xp །]]] ]</syn>
                # <type>like</type>
                # </element>
                for child in childs:
                    if child.tag == 'seg':
                        pos = child.text  # 分词
                    elif child.tag == 'fine':
                        fine = child.text
                    elif child.tag == 'type':
                        type = child.text # 类型
                    elif child.tag == 'emo':
                        emoit = child.text  # 表情符号
                    elif child.tag == 'syn':
                        syn = child.text  # 句法结构
                text = corpus_utils.process_single(pos)
                if len(text) > 0 and text not in pool:
                    pool.add(text)
                    t = types.index(type)
                    corpus_dict[t].append(text)
                    pos_corpus_dict[t].append(pos)
                    emoit_corpus_dict[t].append(emoit)

                    # 3pos序列化
                    pos_seq = corpus_utils.get_3_pos(pos)
                    tag_corpus_dict[t].append(pos_seq)

                    # syn序列化
                    syn_pos = syn_utils.get_syn(syn)
                    syn_corpus_dict[t].append(syn_pos)

        for k in pos_corpus_dict.keys():
            ioutils.write2loacl(corpus_dict[k], dir_root+str(k)+'.dat')
            ioutils.write2loacl(pos_corpus_dict[k], dir_root+str(k)+'.pos')
            ioutils.write2loacl(emoit_corpus_dict[k], dir_root+str(k)+'.emo')
            ioutils.write2loacl(tag_corpus_dict[k], dir_root+str(k)+'.tag')
            ioutils.write2loacl(syn_corpus_dict[k], dir_root+str(k)+'.syn')

    @staticmethod
    def init_w2v(root, name, target):
        texts = []
        for line in open(root+name):
            # print line.strip()
            text = corpus_utils.process_single(line.strip())
            if len(text) > 0:
                texts.append(text)
        ioutils.write2loacl(texts, root+target)
        pass

if __name__ == '__main__':

    # 语料初始化
    corpus_utils.init(ds)

    # for i in range(7):
    #     c = corpus.corpus('../../data/', str(i))
    #     print '所有语料数据:', len(c.get_corpus())
        # print c.get_corpus()[0]
    # ============语料初始化处理
    # corpus_utils.init(ds)
    #
    # ============初始化单独语料
    # init(['../../data/test.xml'])
    #
    # ============生产word2vec模型
    # corpus_utils.init_w2v('../../data/', 'text.txt', 'w2v.dat')

    pass
