#coding:utf8
from __future__ import division, print_function, division, absolute_import

import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import time
import theano
import theano.tensor as T
np.random.seed(123)

#generateData
M,TT,dB,L = 30000, 20000, 25, 12
EqD = int(round((L+10)/2))
SNR = range(-10, 20)

def generateData(M,T,dB,L):
    #QPSK信号
    TxS = np.sign(np.random.rand(M) * 2 - 1) + 1j*np.sign(np.random.rand(M) * 2 - 1) #30000
    ch = [0.0410+0.0109j,0.0495+0.0123j,0.0672+0.0170j,0.0919+0.0235j,
     0.7920+0.1281j,0.3960+0.0871j,0.2715+0.0498j,0.2291+0.0414j,0.1287+0.0154j,
     0.1032+0.0119j]
    ch = ch / np.linalg.norm(ch)
    x = signal.fftconvolve(ch,TxS)[:M]   #信道卷积 x.shape = (30000,)
    #noise
    n=np.random.randn(1,M)+1j*np.random.randn(1,M);
    n=n/np.linalg.norm(n)*pow(10,(-dB/20))*np.linalg.norm(x);
    x = x + n
    x = x.ravel()
    K = M-L-1 #29987
    X = []
    for i in range(K):
        X.append(x[i+L+1:i:-1])
    X = np.array(X).T  # (13,29987)
    TxS = TxS[L:M-6]
    Y = X[:,5:]
    #Y为训练集（接收机收到的数据），TxS为target(发送序列), x为接收端接收到的数据(用来画图比较)
    return Y,TxS.T,x

def score(pdvalue, Tx):
    count = 0
    for i in range(len(pdvalue)-20):
        if pdvalue[i+10].imag * Tx[i+10+L-EqD].imag >= 0 and pdvalue[i+10].real * Tx[i+10+L-EqD].real >=0:
            count += 1
    return count / (len(pdvalue)-20)

def score2(pdvalue, Tx):
    score = 0
    for i in range(len(pdvalue)):
        if pdvalue[i][0] * Tx[i].real >=0 and pdvalue[i][1] * Tx[i].imag >=0:
            score += 1
    return score/len(pdvalue)



###### RLS ######
def RLS(X,Tx):
    # 输入 测试用的X和Tx, 输出 权值和评分
    c = np.zeros( (1,L+1) );
    R_inverse = 100*np.eye(L+1)

    for k in range(TT-10):
        e = Tx[k+10+L-EqD] - c.dot( X[:,k+10]);
        filtered_infrmn_vect = R_inverse.dot(X[:,k+10]);  # (13,1)
        norm_error_power = np.conj(X[:,k+10].T).dot(filtered_infrmn_vect);
        gain_constant = 1 / (1 + norm_error_power);
        norm_filtered_infrmn_vect = gain_constant * np.conj(filtered_infrmn_vect.T);
        c = c + e * norm_filtered_infrmn_vect;
        R_inverse = R_inverse - np.conj(norm_filtered_infrmn_vect.reshape((13,1))).dot(norm_filtered_infrmn_vect.reshape((1,13)));

    sb = np.dot(c, X)
    pdvalue = sb.ravel()
    accuracy = score(pdvalue, Tx)
    return c,sb,accuracy

SER_RLS = []
for db in SNR:
    X, Tx, x = generateData(30000,20000,db,L)
    rls_weights, sb, accuracy = RLS(X,Tx)
    SER_RLS.append(accuracy)
    print('RLS accuracy: {}'.format(accuracy))

X, Tx, x = generateData(30000,20000,dB,L)
rls_weights, sb, accuracy = RLS(X,Tx)
print('RLS accuracy: {}'.format(accuracy))

################# mlp ###################
class Layer(object):
    def __init__(self, inputs, in_size, out_size, activation_function=None):
        self.W = theano.shared(np.random.normal(0,1,(in_size, out_size)))
        self.Wx_plus_b = T.dot(inputs, self.W)
        self.activation_function = activation_function
        if activation_function:
            self.outputs = self.activation_function(self.Wx_plus_b)
        else:
            self.outputs = self.Wx_plus_b

# determine the inputs
z = T.dmatrix('z')
y = T.dmatrix('y')


def MLP(X,Tx):
    Y = X.T
    Y = np.hstack((np.real(Y),np.imag(Y))) #19990 * 26
    Txlist = np.vstack((np.real(Tx), np.imag(Tx))).T  #19990 * 2
    l1 = Layer(y, 2*L+2, 2, None)  #26入2出

    # loss function 误差函数
    cost = T.mean(T.square(l1.outputs - z))

    # compute the gradients
    gW1 = T.grad(cost, l1.W)
    # apply the gradient descent
    learning_rate = 0.05
    train = theano.function(
        inputs = [y,z],
        outputs = cost,
        updates = [(l1.W, l1.W - learning_rate * gW1)])

    # predict
    predict = theano.function(inputs=[y], outputs=l1.outputs)
    for i in range(201):
        err = train(Y, Txlist)
        if i % 50 == 0:
            pass
            # print(i, time.time()-start, err)

    testY,testTx,testx = generateData(30000,20000,db,L)
    testY = testY.T
    testY = np.hstack((np.real(testY),np.imag(testY)))
    predictValue = predict(testY)

    return predictValue, score2(predictValue,testTx)

SER_MLP = []
for db in SNR:
    X, Tx, x = generateData(30000,20000,db,L)
    predictValue, accuracy = MLP(X,Tx)
    SER_MLP.append(accuracy)
    print("MLP ACCURACY: {}".format(accuracy))

################# PLOT #################
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax1.scatter(Tx.real, Tx.imag)
ax2 = fig.add_subplot(2,2,2)
ax2.scatter(x.real, x.imag)
ax3 = fig.add_subplot(2,2,3)
ax3.scatter(sb.real, sb.imag)
ax4 = fig.add_subplot(2,2,4)
ax4.scatter(predictValue[::2],predictValue[1::2])
plt.show()

fig = plt.figure()
plt.plot(SNR, SER_RLS)
plt.plot(SNR, SER_MLP)
plt.show()

print(SER_RLS)
print(SER_MLP)



######################## cnn ##########################
# epoch = 10, db = 10

import tensorflow as tf
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from common.load import *
from common.data_generator import *

db = 20
print(db)
smoothingLen = 127
X, Y, x, y = generateData(30000, 20000, db, smoothingLen, 10, 'cnn') # X (19990,2); Y (19990, 4)
labels = ['1','2','3','4']

def shuffle_in_unison_inplace(a, b):
    assert len(a) == len(b)
    p = np.random.permutation(len(a))
    return a[p], b[p]

# X, Y = shuffle_in_unison_inplace(np.array(X), np.array(Y))

config = tf.ConfigProto(allow_soft_placement=True)

CNN1 = True

with tf.Session(config=config) as sess:

    network = input_data(shape=[2,128,1], name="inp")

    if CNN1:
        network = conv_2d(network, 64, [1,3], activation="relu", name="conv1")
        network = conv_2d(network, 16, [2,3], activation="relu", name="conv2")
        network = fully_connected(network, 128, activation='relu', name="fully")
        network = dropout(network, 0.5, name="drop1")

    network = fully_connected(network, 4, activation='softmax', name="out")
    outputs = tf.constant(labels, name="outputs1")
    strout = tf.gather(outputs, tf.argmax(network, 0), name="outputs2")

    network = regression(network, optimizer='adam',
						loss='categorical_crossentropy',
						learning_rate=0.001)

    model = tflearn.DNN(network, session=sess, tensorboard_verbose=0)

    ops = tf.initialize_all_variables()
    sess.run([ops, outputs])

    model.fit(X, Y, n_epoch=10, validation_set=0.0, shuffle=True, show_metric=True, batch_size=1024, run_id='equalizer_cnn')

    sess.run(outputs)

    gd = 0
    for v, w in zip(X, Y):
        if np.argmax(model.predict([v])[0]) == np.argmax(w):
            gd += 1
    print("Accuracy:", gd/len(Y))
