#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import sys


# ## 1 获取数据集
# ### 1.1 训练集

# In[124]:


data_train = pd.read_csv('train.csv', names=['result', 'words'], header=1)
# data.words = data.words.apply(lambda x:x.split())
data_train.words.apply(lambda x:x.strip())
print(type(data_train.result[0]))
print(type(data_train.words[0][0]))
data_train.tail()


# In[123]:


reviews_train = data_train.iloc[:,1:]
labels_train = data_train.iloc[:,:1]
reviews_train.tail()


# ### 1.2 测试集

# In[69]:


data_test = pd.read_csv('test.csv', names=['result', 'words'], header=1)
# data.words = data.words.apply(lambda x:x.split())
data_test.words.apply(lambda x:x.strip())
data_test.tail()


# In[122]:


reviews_test = data_test.iloc[:,1:]
labels_test = data_test.iloc[:,:1]
reviews_test.tail()


# ## 神经网络
# **定义神经网络分类器**

# In[52]:


class SentimentNetwork(object):
    def __init__(self, reviews, labels, hidden_nodes=10, learning_rate = 0.1):
        """
            reviews(dataFrame), 句子
            labels(dataFrame)， 标签
            hidden_nodes(int), 隐藏层神经元个数
            learning_rate(double)，学习步长
        """
        np.random.seed(1)
        self.pre_process_data(reviews, labels)
        self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)

    def pre_process_data(self, reviews, labels):
        """
        预处理数据，统计reviews中出现的所有单词，并且生成word2index(onehot),一词一个输入单元
        """
        # 统计reviews中出现的所有单词，
        review_vocab = set()
        for review in reviews.values:
            word = review[0].split(' ')  #[0].split
            review_vocab.update(word)

        self.review_vocab = list(review_vocab)

        # 统计labels中所有出现的label
        label_vocab = set()
        for label in labels.values:
            label_vocab.add(label[0])
        self.label_vocab = list(label_vocab)

        # 构建word2idx,dict类型(word:index)
        self.word2index = dict()
        for idx, word in enumerate(self.review_vocab):
            self.word2index[word] = idx

    def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
        self.learning_rate = learning_rate
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes

        self.weights_0_1 = np.random.normal( 0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes) )
        self.weights_1_2 = np.random.normal( 0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes) )
        self.layer_1 = np.zeros((1, self.hidden_nodes))

    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def sigmoid_output_2_derivative(self, output):
        return output * (1 - output)

    def get_target_for_label(self,label):
        if label == 'positive':
            return 1
        else:
            return 0

    # training_reviews_raw 用于表示纯文本数据
    def train(self, training_reviews_raw, training_labels):
        assert(len(training_reviews_raw) == len(training_labels))

        # 将文本进行转换，转换成单词出现的word2idx值的集合
        # 如"这个 红色的 小龙虾 很 好吃"-[20, 150, 300, 312, 500]
        training_reviews = list()
        for review in training_reviews_raw.values:
            words = review[0].split(' ')
            indicates = set()
            for word in words:
                word = word.lower()
                if word in self.word2index.keys():
                    indicates.add(self.word2index[word])
            training_reviews.append(list(indicates))

        assert(len(training_reviews) == len(training_labels))
        correct_so_far = 0
        start = time.time()

        # onehot矩阵稀疏,直接计算layer_1
        for i in range(len(training_reviews)):
            review = training_reviews[i]
            label = training_labels.iloc[i,0]

            self.layer_1 *= 0
            for index in review:
                # 由稀疏性,用加法替代矩阵乘法
                self.layer_1 += self.weights_0_1[index]

            layer_1_o = self.layer_1
            layer_2_i = np.dot( layer_1_o, self.weights_1_2 )
            layer_2_o = self.sigmoid( layer_2_i )

            layer_2_error = layer_2_o - self.get_target_for_label(label)
            layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2_o)

            layer_1_error = np.dot( layer_2_delta, self.weights_1_2.T )
            layer_1_delta = layer_1_error
            
            # 权重更新
            self.weights_1_2 -= np.dot(layer_1_o.T, layer_2_delta) * self.learning_rate

            for index in review:
                self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate

            if(layer_2_o >= 0.5 and label=='positive'):
                correct_so_far += 1
            elif(layer_2_o < 0.5 and label=='negative'):
                correct_so_far += 1

            elapsed_time = float(time.time() - start)
            reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
            if i%2000 == 0:
                sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4]                                  + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5]                                  + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1)                                  + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
            if(i % 2500 == 0):
                print("")
        
    def test(self, testing_reviews, testing_labels):
        assert(len(testing_reviews) == len(testing_labels))

        correct = 0
        start = time.time()

        for i in range(len(testing_reviews)):
            review = testing_reviews.iloc[i,0]
            label = testing_labels.iloc[i,0]

            pred = self.run(review)
            if pred == label:
                correct += 1

            elapsed_time = float(time.time() - start)
            reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
            sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4]                              + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5]                              + " #Correct:" + str(correct) + " #Tested:" + str(i+1)                              + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
        return correct * 100 / float(len(testing_reviews))

    def run(self, review):
        indicates = set()
        for word in review.lower().split(' '):
            if word in self.word2index.keys():
                indicates.add( self.word2index[word] )

        self.layer_1 *= 0
        for idx in indicates:
            self.layer_1 += self.weights_0_1[idx]

        layer_1_o = self.layer_1
        layer_2_i = np.dot( layer_1_o, self.weights_1_2 )
        layer_2_o = self.sigmoid( layer_2_i )            
        if layer_2_o >= 0.5:
            return 'positive'
        else:
            return 'negative'


# **实例化测试**

# In[125]:


myclf = SentimentNetwork(reviews_train, labels_train, hidden_nodes=15, learning_rate=0.04)


# In[121]:


myclf.train(training_labels=labels_train, training_reviews_raw=reviews_train)
print('\nTrain complete!\nTesing begins!')
rate_right = myclf.test(testing_labels=labels_test, testing_reviews=reviews_test)
rate_right  # 78.2%左右


# ## 寻找最佳参数
# **栅格搜索法**

# In[54]:


def find_best_params(reviews_train, labels_train,reviews_test,labels_test):
    # 范围勿太大,易死机 
    # [已测试] alpha大致0.04最好 nodes数15左右
    alpha = [0.04]
    nodes = [i for i in range(13,14)]
    max_correct = 0
    for i in range(len(alpha)):
        for j in range(len(nodes)):
            myclf = SentimentNetwork(reviews_train, labels_train, hidden_nodes=15, learning_rate=0.04)
            myclf.train(training_labels=labels_train, training_reviews_raw=reviews_train)
            rate_right = myclf.test(testing_labels=labels_test, testing_reviews=reviews_test)
            if rate_right > max_correct:
                max_correct = rate_right
                best_params = alpha[i], nodes[j], rate_right
    return best_params
best_params = find_best_params(reviews_train, labels_train,reviews_test,labels_test)
best_params


# `运行后删除输出结果!!否则输出信息太大,以后打不开`

# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:


### 后续优化 myclf.word2index['这']


# In[112]:


dd = pd.read_csv('data/neg_stopped.txt',encoding='gbk')
dd.insert(0,'result','negtive')
cc = pd.read_csv('data/pos_stopped.txt',encoding='gbk')
cc.insert(0,'result','positive')
ddd = pd.concat([cc,dd],axis=0)
data_train = ddd.sample(frac=1,random_state=1).reset_index(drop=True)
data_train.tail()

