#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/5/3 下午11:03
# @Author  : zhangzhen
# @Site    : 
# @File    : corpus.py
# @Software: PyCharm
import os
from gensim.models import word2vec
import sys
try:
    reload(sys)
    sys.setdefaultencoding('utf-8')
except:
    pass

class corpus():

    def __init__(self, root, name):
        """
        root 语料所在根目录  形如：../../
        name 语料的文件名  形容：*.dat
        表情已经进行切分
        :param root: 
        :param name: 
        """
        self.__root = root
        self.__name = name
        self.__pos_name = self.__name + '.pos'
        self.__corpus_name = self.__name + '.dat'
        self.__emo_name = self.__name + '.emo'
        self.__tag_name = self.__name + '.tag'
        self.__syn_name = self.__name + '.syn'
        self.__w2v_name = 'w2v.model'

        self.__pos_corpus = []
        self.__corpus = []
        self.__emoit = []
        self.__tag = []  # 保存3pos的文本结构
        self.__syn = []  # 保存syn的文本结构

        self.__cluster_result = []

        self.__model = None
        self.__cur_index = []  # 待聚类的文本
        self.__index = []  # 保存已经聚过类的文本
        self.__indices = []  # 按照类保存文本的索引

        # 词性标记的语料
        lines = open(self.__root + self.__pos_name).readlines()
        for cur, line in enumerate(lines):
            self.__pos_corpus.append(line.strip('\n'))
            self.__cur_index.append(cur)

        # 去分词及去停用词
        for line in open(self.__root+self.__corpus_name).readlines():
            self.__corpus.append(line.strip('\n'))

        # 表情化的语料
        for line in open(self.__root+self.__emo_name).readlines():
            line = line.strip('\n')
            if line == 'null':
                self.__emoit.append([])
            else:
                self.__emoit.append(line.split('&')[:-1])

        # 3pos化的语料
        for line in open(self.__root+self.__tag_name).readlines():
            line = line.strip('\n')
            if line == 'null':
                self.__tag.append('')
            else:
                self.__tag.append(line)

        # syn化的语料
        for line in open(self.__root + self.__syn_name).readlines():
            line = line.strip('\n')
            if line == 'null':
                self.__syn.append('')
            else:
                self.__syn.append(line)

        self.__len = len(self.__corpus)

    def get_corpus(self):
        return [self.__corpus[v] for k, v in enumerate(self.__cur_index)]

    def get_pos_corpus(self):
        return [self.__pos_corpus[v] for k, v in enumerate(self.__cur_index)]

    def get_emoit_corpus(self):
        return [self.__emoit[v] for k, v in enumerate(self.__cur_index)]

    def get_tag_corpus(self):
        return [self.__tag[v] for k, v in enumerate(self.__cur_index)]

    def get_syn_corpus(self):
        return [self.__syn[v] for k, v in enumerate(self.__cur_index)]

    def get_root(self):
        return self.__root

    def get_len(self):
        return len(self.__cur_index)

    def get_cur_index(self):
        return self.__cur_index

    def get_cluster_result(self):
        return self.__cluster_result

    def get_pos_corpus_by_indies(self, indices):
        return [self.__pos_corpus[v] for k, v in enumerate(indices)]

    def refresh_corpus(self, res):
        """挑选最大部分的语料"""
        # 剩余的文本
        print '选择最大的语料继续聚类'
        rest = [val for (index, val) in enumerate(self.__cur_index) if index in res]
        self.__cur_index = rest
        pass

    def refresh(self, res):
        """
        根据分类的结果,更新当前索引
        :param res: 
        :return: 
        """
        tmp = []
        for re in res:
            self.__cluster_result.append([val for (index, val) in enumerate(self.__cur_index) if index in re])
            for i in re:
                tmp.append(i)
        # 剩余的文本
        rest = [val for (index, val) in enumerate(self.__cur_index) if index not in tmp]
        self.__cur_index = rest

    def get_w2v_model(self):

        if os.path.isfile(self.__root+self.__w2v_name):
            self.__model = word2vec.Word2Vec.load(self.__root+self.__w2v_name)
        else:
            w2v_corpus = []
            for line in open(self.__root+'w2v.dat'):
                corpus.append(line.strip().split())
            self.__model = word2vec.Word2Vec(w2v_corpus, size=100, window=5, min_count=5, workers=4)
            self.__model.save(self.__root+self.__w2v_name)
        return self.__model

if __name__ == '__main__':
    sum  = 0
    for i in range(7):
        data = corpus('../../data/', str(i))
        print "精华语料", len(data.get_corpus()), data.get_corpus()[1]
        print "词性标注", len(data.get_pos_corpus()), data.get_pos_corpus()[1]
        print "表情化", len(data.get_emoit_corpus()), data.get_emoit_corpus()[1]
        print "3pos化", len(data.get_tag_corpus()), data.get_tag_corpus()[1]
        print "SYN化", len(data.get_syn_corpus()), data.get_syn_corpus()[1]
        sum += len(data.get_corpus())
    print sum
