#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/5/10 下午6:29
# @Author  : zhangzhen
# @Site    : 
# @File    : f12.py
# @Software: PyCharm
"""依据情感词典+影响因子"""
from com.corpus.corpus import corpus
from com.dict.word import sent_dict
import numpy as np
import string
import sys
from time import time
import random
import numpy as np
from collections import defaultdict
import pandas as pd
from sklearn.multiclass import OneVsOneClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
import matplotlib.pyplot as plt

def load():
    """对语料进行特征化 词典+影响因子"""
    X = []
    y = []
    for i in range(7):
        data = corpus('../../data/', str(i))
        poss = data.get_pos_corpus()
        for k, c in enumerate(data.get_corpus()):
            one = np.zeros(9)
            line = poss[k]
            one[6] += line.count('/c')
            one[7] += line.count('/nd')
            for w in c.split(' '):
                t = sent_dict.jude_senti_and_get_type(w)
                if t is not None:
                    one[t] += 1
            X.append(one)
            y.append(i)
    X = np.array(X)
    y = np.array(y)
    return X, y


def tt(X, y, train_num, test_num):
    # 随机选取训练数据 300
    train_index = []
    test_index = []
    type_dict = defaultdict(list)
    # 随机选取各类数据100个
    for k, v in enumerate(y):
        type_dict[v].append(k)
    for k, v in type_dict.iteritems():
        if train_num >= len(type_dict[k]):
            train_index.extend(type_dict[k])
        else:
            train_index.extend(random.sample(type_dict[k], train_num))
        test_index.extend(random.sample(type_dict[k], test_num))

    train_index = np.array(train_index)
    X_train = np.array([X[i] for i in train_index])
    y_train = np.array([y[i] for i in train_index])

    X_test = np.array([X[i] for i in test_index])
    y_test = np.array([y[i] for i in test_index])

    print X_train.shape, y_train.shape
    print X_test.shape, y_test.shape

    acc = np.ones(7)
    err = np.ones(7)
    res = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train).predict(X_test)
    # res = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X_train, y_train).predict(X_test)
    # print res
    for i, re in enumerate(res):
        if re == y_test[i]:
            acc[re] += 1
        else:
            err[re] += 1
    # 宏平均
    ap = np.sum(acc / (err + acc)) / 7
    ar = np.sum(acc / (test_num+2)) / 7
    # 微平均
    ip = np.sum(acc) / (np.sum(acc) + np.sum(err))
    ir = np.sum(acc) / ((test_num+2)*7)

    return ap, ar, ip, ir

if __name__ == '__main__':
    print 100*'-'
    start = time()
    X, y = load()

    # length = [4420, 4345, 373, 1374, 801, 318, 328]
    train_num = 400
    test_num = 100
    # 50时词
    times = 50
    m_a_p = []
    m_a_r = []
    m_i_p = []
    m_i_r = []
    # 循环times 词
    for time in range(times):
        ap, ar, ip, ir = tt(X, y, train_num, test_num)
        m_a_p.append(ap)
        m_a_r.append(ar)
        m_i_p.append(ip)
        m_i_r.append(ir)

    m_a_p = np.sum(m_a_p)/times
    m_a_r = np.sum(m_a_r)/times
    m_i_p = np.sum(m_i_p)/times
    m_i_r = np.sum(m_i_r)/times

    print "宏准确率:", m_a_p
    print "宏召回率:", m_a_r
    print "宏F值:", 2 * m_a_p * m_a_r/(m_a_p + m_a_r)
    print "微准确率",m_i_p
    print "微召回率",m_i_r
    print "微F值", 2 * m_i_p * m_i_r/(m_i_p + m_i_r)
