# -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 15:44:01 2018

@author: Sz-wyz

E-mail:13071066692@163.com
"""
import numpy as np
import jieba
import pandas as pd
import re
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from collections import Counter
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression

def translate(str):  # 将文本中的中文提取出来
    pat = re.compile(u"[^\\u4e00-\\u9fa5]")
    result = ''.join(pat.split(str))
    return result


def load_trainfile(s):
    df = pd.read_csv(s)
    df = df[['content', 'subject', 'sentiment_value', 'sentiment_word']]

    # 分词过后的content
    jieba.add_word('变色龙')
    jieba.add_word( '森林人')
    content_cuted = []
    for i, item in enumerate(df['content']):
        item_transed = translate(item)
        content_cuted.append(','.join(jieba.cut(item_transed, cut_all=False)).split(','))

    sentiment_subject_sc = df[['sentiment_word', 'subject', 'sentiment_value']]
    sentiment_subject_sc['content'] = content_cuted
    return sentiment_subject_sc


def yuchuli(sentiment_subject_sc):
    f = open('stopwords.txt')
    stopwords = str(f.readlines()[0]).split(' ')  # 停用词
    f.close()
    new = []
    for item in sentiment_subject_sc['content']:
        temp = []
        for i in item:
            if i not in stopwords:
                temp.append(i)
        new.append(temp)

    sentiment_subject_sc['content'] = pd.Series(new)  # 剔除停用词后的content
    new = []
    for item in sentiment_subject_sc['content']:
        for i in item:
            new.append(i)
    frequence2 = Counter(new)
    all_words = []
    for i in frequence2.most_common():
        all_words.append(i)
    all_words_in = []
    for i in all_words:
        if i[1] > 30:
            all_words_in.append(i)
    all_words_in = [i[0] for i in all_words_in]
    all_words_in.append('惨不忍睹')
    all_words_in.append('变色龙')
    all_words_in.append('轻松')
    all_words_in.append('没敢')
    all_words_in.append('行情')
    all_words_in.append('通病')
    all_words_in.append('无力')
    all_words_in.append('颜色')
    all_words_in.append('好用')
    all_words_in.append('实惠')
    all_words_in.append('帅气')
    all_words_in.append('社会')
    all_words_in.append('刹车踏板')

    new = []
    for item in sentiment_subject_sc['content']:
        temp = []
        for i in item:
            if i in all_words_in:
                temp.append(i)
        new.append(temp)

    sentiment_subject_sc['content'] = pd.Series(new)

    sentiment_word = list(sentiment_subject_sc['sentiment_word'].unique())
    sentiment_word = list(pd.Series(sentiment_word).dropna())
    sentiment_word_cuted = []
    for i, item in enumerate(sentiment_word):
        item_transed = translate(item)
        sentiment_word_cuted.append(','.join(jieba.cut(item_transed)).split(','))
    sentiment_word_cuted_all = []
    for i in sentiment_word_cuted:
        sentiment_word_cuted_all.extend(i)

    sentiment_word_cuted_all = list(pd.Series(sentiment_word_cuted_all).unique())
    final_l = []
    for i in range(len(sentiment_subject_sc['content'])):
        temp = []
        for item in sentiment_subject_sc['content'][i]:
            if item in sentiment_word_cuted_all:
                for t in range(100):
                    temp.append(item)
        temp.extend(sentiment_subject_sc['content'][i])
        final_l.append(temp)

    sentiment_subject_sc['content'] = pd.Series(final_l)

    useless_list = []
    for i in range(len(sentiment_subject_sc['content'])):
        useless = ''
        for item in sentiment_subject_sc['content'][i]:
            useless += item
            useless += ' '
        useless_list.append(useless)
    sentiment_subject_sc['content'] = pd.Series(useless_list)
    return sentiment_subject_sc


def sfit(sc):
    vectorizer = TfidfVectorizer(encoding='utf-8', max_df=0.15, min_df=5, max_features=5000)
    x = vectorizer.fit_transform((x for x in sc['content']))
    y = sc['subject']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
    clf = MultinomialNB(alpha=0.001)  # alpha为平滑指数
    clf.fit(x_train, y_train)
    pred = clf.predict(x_test)
    return classification_report(pred, y_test)


def vfit(sc):
    vectorizer = TfidfVectorizer(encoding='utf-8', max_df=0.45, min_df=50, max_features=5000)
    x = vectorizer.fit_transform((x for x in sc['content']))
    y = sc['sentiment_value']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
    clf = MultinomialNB(alpha=0.001)
    clf.fit(x_train, y_train)
    pred = clf.predict(x_test)
    return classification_report(pred, y_test)

sentiment_subject_sc = load_trainfile('train.csv')
sentiment_subject_sc2 = yuchuli(sentiment_subject_sc)
# print(len(sentiment_subject_sc2['subject']))
print(sfit(sentiment_subject_sc2))
print(vfit(sentiment_subject_sc2))
