import numpy as np
import pandas as pd
import math
import tensorflow as tf
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn import svm
import latticex.rosetta as rtt

rtt.activate("SecureNN")

def load_file(path):
    data = pd.read_csv(path, encoding="utf-8")
    #array, label = rtt.PrivateDataset(data_owner=(0, 1, 2), label_owner=1).load_data(data["remark"][:7000], list(data["id"][:7000]))
    array = data["remark"][7000:14000]
    label = list(data["id"][7000:14000])
    label = np.array(label)
    label = label.reshape(len(label), 1)
    # 1 positive 好评, 0  negative 差评
    return array, label


def data_processing(array):
    vectorized = CountVectorizer()
    transformer = TfidfTransformer()
    count = vectorized.fit_transform(array.values.astype('U'))

    tfidf_matrix = transformer.fit_transform(count)
    matrix = tfidf_matrix.toarray()
    pca = PCA(n_components=5)
    pca.fit(matrix)
    new_matrix = pca.fit_transform(matrix)
    print(new_matrix)

    return new_matrix


def svm_function(x_train, x_test, y_train, y_test):
    clf = svm.SVC()
    clf.fit(X=x_train, y=y_train, sample_weight=None)
    result = clf.predict(x_test)
    print(result)
    score = clf.score(x_test, y_test)
    return score



path = "/home/test/PycharmProjects/pythonProject/tutorials/travelsky/data/svmNlp.csv"
data, label = load_file(path)
data_processed = data_processing(data)
data_processed = rtt.PrivateDataset(data_owner=(0, 1, 2), label_owner=1).load_data(data_processed, label)
print(data_processed)
x_train, x_test, y_train, y_test = train_test_split(data_processed[0], data_processed[1], test_size=0.3)

learning_rate = 0.001
x = tf.placeholder(tf.float64, [None, 5])
y = tf.placeholder(tf.float64, [None, 1])
w = tf.Variable(tf.zeros([5, 1], dtype=tf.float64))
b = tf.Variable(tf.zeros([1], dtype=tf.float64))

# loss
pred_Y = tf.sigmoid(tf.matmul(x, w) + b)
logistics = tf.matmul(x, w) + b
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logistics)
loss = tf.reduce_mean(loss)
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()

EPOCHES = 10
BATCH_SIZE = 64
with tf.Session() as sess:
    sess.run(init)
    xw, xb = sess.run([w, b])
    print("--------------------------start---------------------------")
    print("init weight:{} \nbias:{}".format(xw, xb))

    # train
    BATCHES = math.ceil(len(x_train) / BATCH_SIZE)
    for e in range(EPOCHES):
        for i in range(BATCHES):
            bX = x_train[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]
            bY = y_train[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]
            sess.run(train, feed_dict={x: bX, x: bY})

            j = e * BATCHES + i
            if j % 50 == 0 or (j == EPOCHES * BATCHES - 1 and j % 50 != 0):
                xW, xb = sess.run([w, b])
                print("I,E,B:{:0>4d},{:0>4d},{:0>4d} weight:{} \nbias:{}".format(
                    j, e, i, xW, xb))

    # predict
    y_predict = sess.run(pred_Y, feed_dict={x: x_test, y: y_test})
    print("Y_predict:", y_predict)