#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/5/10 下午6:29
# @Author  : zhangzhen
# @Site    : 
# @File    : f12.py
# @Software: PyCharm
"""依据情感词典+影响因子+规则特征"""
from com.corpus.corpus import corpus
from com.dict.word import sent_dict
import numpy as np
import string
import sys
from time import time
import random
import numpy as np
from collections import defaultdict
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import pandas as pd
from sklearn.multiclass import OneVsOneClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
import matplotlib.pyplot as plt

def load():
    """对语料进行特征化 词典+ (170 syn) """
    X1 = []
    y = []
    syn_pool = list()
    documents = []
    vectorizer = CountVectorizer()  # 该类会将文本中的词语转换为词频矩阵，矩阵元素a[i][j] 表示j词在i类文本下的词频
    transformer = TfidfTransformer()  # 该类会统计每个词语的tf-idf权值
    for i in range(7):
        data = corpus('../../data/', str(i))
        poss = data.get_pos_corpus()
        documents.append(data.get_syn_corpus())
        for k, c in enumerate(data.get_corpus()):
            # 获取syn特征
            syn_line = data.get_syn_corpus()[k]
            if len(syn_line) > 0:
                syn_pool.extend(syn_line.split('&'))

            # 获取 情感词+影响因子  9 维度
            one = np.zeros(9)
            line = poss[k]
            one[6] += line.count('/c')
            one[7] += line.count('/nd')
            for w in c.split(' '):
                t = sent_dict.jude_senti_and_get_type(w)
                if t is not None:
                    one[t] += 1
            X1.append(one)
            y.append(i)

    X1 = np.array(X1)
    #############################################################
    texts = []
    # 特征库
    syn_pool = list(set(syn_pool))
    for i, ds in enumerate(documents):
        for d in ds:
            if len(d) > 0:
                ws = d.split('&')
                feature = [str(syn_pool.index(w)) for w in ws]
                texts.append(' '.join(feature))
            else:
                texts.append('')

    tfidf = transformer.fit_transform(vectorizer.fit_transform(texts))
    # word = vectorizer.get_feature_names() # 获取对应特征值
    X2 = tfidf.toarray()
    X2 = SelectKBest(chi2, k=170).fit_transform(X2, y)
    #############################################################

    print X1.shape, X2.shape
    X = np.append(X1, X2, axis=1)
    print X.shape
    # X = np.array(X)
    # y = np.array(y)
    return X, y


def tt(X, y, train_num, test_num):
    # 随机选取训练数据 300
    train_index = []
    test_index = []
    type_dict = defaultdict(list)
    # 随机选取各类数据100个
    for k, v in enumerate(y):
        type_dict[v].append(k)
    for k, v in type_dict.iteritems():
        if train_num >= len(type_dict[k]):
            train_index.extend(type_dict[k])
        else:
            train_index.extend(random.sample(type_dict[k], train_num))
        test_index.extend(random.sample(type_dict[k], test_num))

    train_index = np.array(train_index)
    X_train = np.array([X[i] for i in train_index])
    y_train = np.array([y[i] for i in train_index])

    X_test = np.array([X[i] for i in test_index])
    y_test = np.array([y[i] for i in test_index])

    print X_train.shape, y_train.shape
    print X_test.shape, y_test.shape

    acc = np.ones(7)
    err = np.ones(7)
    res = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train).predict(X_test)
    # res = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X_train, y_train).predict(X_test)
    # print res
    for i, re in enumerate(res):
        if re == y_test[i]:
            acc[re] += 1
        else:
            err[re] += 1
    print acc
    print err
    # 宏平均
    ap = np.sum(acc / (err + acc)) / 7
    ar = np.sum(acc / (test_num+2)) / 7
    # 微平均
    ip = np.sum(acc) / (np.sum(acc) + np.sum(err))
    ir = np.sum(acc) / (7 * (test_num+2))

    return ap, ar, ip, ir

if __name__ == '__main__':
    print 100*'-'
    start = time()
    X, y = load()
    print '加载语料',str(time()-start)

    train_num = 400
    test_num = 100
    # 50时词
    times = 50
    m_a_p = []
    m_a_r = []
    m_i_p = []
    m_i_r = []
    # 循环times 词
    for time in range(times):
        ap, ar, ip, ir = tt(X, y, train_num, test_num)
        m_a_p.append(ap)
        m_a_r.append(ar)
        m_i_p.append(ip)
        m_i_r.append(ir)

    m_a_p = np.sum(m_a_p)/times
    m_a_r = np.sum(m_a_r)/times
    m_i_p = np.sum(m_i_p)/times
    m_i_r = np.sum(m_i_r)/times

    print "宏准确率:", m_a_p
    print "宏召回率:", m_a_r
    print "宏F值:", 2 * m_a_p * m_a_r/(m_a_p + m_a_r)
    print "微准确率",m_i_p
    print "微召回率",m_i_r
    print "微F值", 2 * m_i_p * m_i_r/(m_i_p + m_i_r)
