#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/5/7 下午8:02
# @Author  : zhangzhen
# @Site    : 
# @File    : sequence.py
# @Software: PyCharm

from com.corpus import corpus
from com.dict.word import sent_dict
from com.utils.ioutils import ioutils
from collections import defaultdict
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
from time import time
import random
import numpy as np
import itertools
import sys
import re
try:
    reload(sys)
    sys.setdefaultencoding('utf-8')
except:
    pass

types = ["happiness", "like", "surprise", "sadness", "disgust", "anger", "fear"]

class sequence():

    def __init__(self, seq, sup=0.0, conf=0.0):
        self.__seq = seq
        self.__sup = sup
        self.__conf = conf
        pass

    def get_seq(self):
        return self.__seq

    def set_sup(self, sup):
        self.__sup = sup

    def get_sup(self):
        return self.__sup

    def set_conf(self, conf):
        self.__conf = conf

    def get_conf(self):
        return self.__conf

    def __str__(self):
        return '(sequence: %s, %0.8f, %0.8f)' % (self.__seq, self.__sup, self.__conf)

    pass


class seq_utils():

    seq_pool = set()
    @staticmethod
    def max_type(type_dict):
        """返回连词及转则词之间的整体情感"""
        t = None
        tmp = 0
        for k, v in type_dict.iteritems():
            if v > tmp:
                tmp = v
                t = k
        return t

    @staticmethod
    def get_fine(str):
        line = []
        flag = True
        for c in str:
            if c == '/':
                flag = False
            elif c == ' ':
                flag = True
            if flag:
                line.append(c)
        return ''.join(line).split()

    @staticmethod
    def get_type(words, start, end):
        """根据当前句子片段判断seq"""
        tmp = defaultdict(int)
        res = []
        if start < end:
            for i in range(start, end):
                w = words[start].strip('་')
                if len(w) == 0:
                    continue
                if w in sent_dict.sentiword.keys():
                    t = sent_dict.sentiword[w].get_type()
                    tmp[types[t]] += 1
        # 对后面没有处理完的情感词进行统计情感
        last_type = seq_utils.max_type(tmp)
        if last_type is not None:
            res.append(last_type)

        if len(res) > 0:
            return res
        else:
            return None

    @staticmethod
    def get_combinations(seq_line):
        """返回所有的子序列"""
        ws = seq_line.split('#')
        seq = ws[0].split(' ')
        tmp = []
        # 进行生成子序列规则
        for i in range(1, len(seq) + 1):
            iter = itertools.combinations(seq, i)
            tmp.append(list(iter))
        # 子序列规则输出
        out = []
        for sub in tmp:
            middleware = []
            for s in sub:
                middleware.append('-'.join(list(s)))
            out.append(' '.join(middleware))
        return ' '.join(out), ws[1]

    @staticmethod
    def get_sub_seq_sent(sent):
        """对语料进行序列规则化
            a b c a-b a-c b-c a-b-c
            返回 seq1 seq2 ... seqn
            其中 seqn=[A-B..-Z]的形式
        """
        # 解析单句 返回 [s1 s2 s3 ... sn]
        res = seq_utils.seq_sent(sent)
        if len(res) > 0:
            tmp = []
            # 进行生成子序列规则
            for i in range(1, len(res) + 1):
                iter = itertools.combinations(res, i)
                tmp.append(list(iter))

            # 子序列规则输出
            out = []
            for sub in tmp:
                middleware = []
                for s in sub:
                    middleware.append('-'.join(list(s)))
                out.append(' '.join(middleware))
            return ' '.join(out)
        return 'null'

    @staticmethod
    def seq_sent(sent):
        """
            序列化一个句子
            ABC->t
            A->t
            B->t
            C->t
            ...
            :return
            []
            ['a','b','c','d']
        """
        seq_tag = []

        # 提取词性标记,以空格隔开
        sent = sent + ' '
        r_tag = r'/(.*?)[ ]'
        p = re.compile(r_tag)
        matchs = p.findall(sent)
        # print matchs

        # 提取单词文本,以空格隔开
        words = seq_utils.get_fine(sent)
        # print len(matchs), len(words)

        pre = 0
        for i, v in enumerate(matchs):
            if v == 'xp':
                tmp_type = seq_utils.get_type(words, pre, i)
                if tmp_type is not None:
                    seq_tag.extend(tmp_type)
                pre = i+1
            if v == 'c':
                seq_tag.append('con')
                tmp_type = seq_utils.get_type(words, pre, i)
                if tmp_type is not None:
                    seq_tag.extend(tmp_type)
                pre = i + 1

        tmp_type = seq_utils.get_type(words, pre, len(matchs))
        if tmp_type is not None:
            seq_tag.extend(tmp_type)
        return set(seq_tag)

    @staticmethod
    def init_seq():
        """获取所有序列规则"""
        start = time()
        for t in range(7):
            c = corpus.corpus('../../data/', str(t))
            sents = c.get_pos_corpus()
            for sent in sents:
                seq = seq_utils.seq_sent(sent)
                if len(seq) > 0:
                    seq_utils.seq_pool.add(' '.join(seq)+'#'+str(t))
        ioutils.write2loacl(seq_utils.seq_pool, 'seq.txt')
        print '耗时时间%0.3f ms'%(time() - start)

    @staticmethod
    def init_sub_rule():
        """根据序列规则计算所有子规则的支持度和置信度"""

        # 存储 <sub-num>
        seq_dict = defaultdict(int)
        seq_pool = defaultdict()
        seq_count = 0  # 父序列总共
        sub_set = set()
        for line in open('seq.txt'):
            seq_count += 1
            # 返回子序列,对应类别
            sub_line, t = seq_utils.get_combinations(line.strip())
            rs = set(sub_line.split(' '))

            for s in rs:
                # 添加所有子序列的前缀
                sub_set.add(s)
                seq_dict[s+'#'+t] += 1

        # print seq_dict
        # 计算支持度
        for k, v in seq_dict.iteritems():
            seq_pool[k] = sequence(k, sup=1.0*v/seq_count)
            # print seq_pool[k]

        # 计算置信度
        for sub in sub_set:
            # 统计各个类别的分量
            count = defaultdict(int)
            tmp_total = 0
            for i in range(7):
                key = sub + '#' + str(i)
                if key in seq_dict.keys():
                    count[i] = seq_dict[key]
                    tmp_total += seq_dict[key]

            for i in range(7):
                if i in count.keys():
                    key = sub + '#' + str(i)
                    seq_pool[key].set_conf(1.0*count[i]/tmp_total)
                    # print seq_pool[key]

        # 按照seq$sup$conf输出 -> seq_rule.txt
        f = open('seq_rule.txt', 'w+')
        for k, v in seq_pool.iteritems():
            f.write(v.get_seq()+'$'+str(v.get_sup())+'$'+str(v.get_conf())+'\n')
        f.close()

    @staticmethod
    def get_seq(min_sup=0.0, min_conf=0.0):
        rules = defaultdict(int)
        cur = 0
        for line in open('../../data/seq_rule.txt').readlines():
            # sadness-like-con-disgust#1$0.000803212851406$1.0
            ws = line.strip().split('$')
            if float(ws[1]) >= min_sup and float(ws[2]) >= min_conf:
                rules[ws[0]] = cur
                cur += 1
        return rules

    @staticmethod
    def load_seq(min_sup=0.0, min_conf=0.0):
        """ 加载seq ; 将文本 特征化 """
        features = seq_utils.get_seq(min_sup=min_sup, min_conf=min_conf)
        vectorizer = CountVectorizer()  # 该类会将文本中的词语转换为词频矩阵，矩阵元素a[i][j] 表示j词在i类文本下的词频
        transformer = TfidfTransformer()  # 该类会统计每个词语的tf-idf权值
        texts = []
        y = []
        for i in range(7):
            c = corpus.corpus('../../data/', str(i))
            seq_corpus = c.get_seq_corpus()
            for line in seq_corpus:
                """需要特征数字化"""
                tmp = []
                ws = line.split(' ')
                for w in ws:
                    key = w+'#'+str(i)
                    if key in features.keys():
                        tmp.append(str(features[key]))
                texts.append(' '.join(tmp))
                y.append(i)
        tfidf = transformer.fit_transform(vectorizer.fit_transform(texts))
        word = vectorizer.get_feature_names()  # 获取对应特征值
        X = tfidf.toarray()
        return X, np.array(y), len(word)


def plot(macro_p, macro_r, micro_p, micro_r, tops):

    x = [i for i, v in enumerate(tops)]
    # 创建绘图对象，figsize参数可以指定绘图对象的宽度和高度，单位为英寸，一英寸=80px
    plt.figure(figsize=(8, 4))
    # 在当前绘图对象中画图（x轴,y轴,给所绘制的曲线的名字，画线颜色，画线宽度）
    # 宏平均(macro-average)和微平均(micro-average)
    #
    plt.plot(x, macro_p, 'ro-', label="$macro-precision$", color="red", linewidth=2)
    plt.plot(x, macro_r, 'gv-', label="$macro-recall$", color="green", linewidth=2)
    plt.plot(x, 2*macro_p*macro_r/(macro_p+macro_r), 'bs-', label="$macro-F$", color="blue", linewidth=2)
    plt.plot(x, micro_p, 'ch-', label="$micro-average$", color="black", linewidth=2)
    # plt.plot(x, micro_r, 'mD-', label="$micro-recall$", color="cyan", linewidth=2)
    # plt.plot(x, 2*micro_p*micro_r/(micro_p+micro_r), 'r^-', label="$micro-F$", color="magenta", linewidth=2)
    # X轴的文字
    # plt.xlabel("Time(s)")
    # group_labels = ['10', '40', '80', '100', '120', '150', '180', '200', '220']
    plt.xticks(x, tops, rotation=0)
    # Y轴的文字
    # plt.ylabel("Volt")

    # 图表的标题
    plt.title(u'Classification of Sequence Rules Features with Different Min_conf')
    # Y轴的范围
    plt.xlim(0, len(tops))
    plt.ylim(0.00, 0.9)
    # 显示图示
    plt.legend()
    plt.grid()
    # 显示图
    plt.show()


def train_test(min_sup=0.0, min_conf=0.0, train_num=400, test_num=100):

    X, y, tops= seq_utils.load_seq(min_sup=min_sup, min_conf=min_conf)

    train_index = []
    test_index = []
    type_dict = defaultdict(list)

    # 获取各类别 标序号
    for k, v in enumerate(y):
        type_dict[v].append(k)

    for k, v in type_dict.iteritems():
        if train_num >= len(type_dict[k]):
            train_index.extend(type_dict[k])
        else:
            train_index.extend(random.sample(type_dict[k], train_num))
        test_index.extend(random.sample(type_dict[k], test_num))  # 测试索引

    train_index = np.array(train_index)
    X_train = np.array([X[i] for i in train_index])  # 训练数据
    y_train = np.array([y[i] for i in train_index])  # 训练标签数据
    # 随机选取测试 100
    X_test = np.array([X[i] for i in test_index])  #
    y_test = np.array([y[i] for i in test_index])  #
    res = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train).predict(X_test)
    # res = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X, y).predict(X_test)
    acc = np.ones(7)
    err = np.ones(7)
    for i, re in enumerate(res):
        if re == y_test[i]:
            acc[re] += 1
        else:
            err[re] += 1
    return acc, err, tops

if __name__ == '__main__':

    # seq_utils.init_seq()
    #######################################################################################

    # start = time()
    # print '起始时间', start
    # end = time()
    # print '终止时间', end
    # print '耗时时间', end - start
    # sent = 'མགོ་/n སྟེང་/n དུ་/kl ཐོག་/n བརྒྱབ་/v པ་/h ལྟ་/k བུ/k ར་/uf གློ་/k བུར་/k གྱི་/kg གོད་/n ཆགས་/v བྱུང་/v བ/h འི་/kg དཔེ/n །/xp'
    # seq_utils.get_sub_seq_sent(sent)

    # 初始化: 计算关联规则子序列  初始化执行一次就行了
    # seq_utils.init_sub_rule()
    #######################################################################################

    # rules = seq_utils.get_seq(0.05, 0.03)
    # print len(rules)

    #######################################################################################
    print 100*'-'

    macro_p = []
    macro_r = []
    micro_p = []
    micro_r = []
    test_num = 100
    train_num = 400
    min_spans = np.arange(0.005, 0.06, 0.005)
    for min_span in min_spans:
        acc, err, tops = train_test(min_sup=min_span, min_conf=0, test_num=test_num, train_num=train_num)
        # ap = 0.0
        # for i in range(7):
        #     if acc[i] == 0:
        #         continue
        #     ap += acc[i]/(err[i]+acc[i])
        # ap = ap / 7
        ap = np.sum(acc / (acc+err)) / 7
        ar = np.sum(acc / (test_num+2)) / 7
        ip = np.sum(acc) / (np.sum(acc) + np.sum(err))
        ir = np.sum(acc) / (7 * (test_num+2))
        print "维度", tops
        print "宏准确", ap
        print "宏召回率", ar
        print "宏F值", 2 * ap * ar / (ap + ar)
        print "微准确率", ip
        print "微召回率", ir
        print "微F值", 2 * ip * ir / (ip + ir)
        macro_p.append(ap)
        macro_r.append(ar)
        micro_p.append(ip)
        micro_r.append(ir)

    # 绘图操作
    print '宏准确率',macro_p
    print '宏召回率',macro_r
    print '微准率',micro_p

    plot(np.array(macro_p), np.array(macro_r), np.array(micro_p), np.array(micro_r), min_spans)

    pass
