#coding:utf8
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from scipy import signal
np.random.seed(123)

test = np.loadtxt('../test.txt')     #32768 接收端
send = np.loadtxt('../send.txt')     # 2 * 4096

test /= (sum(abs(test)) / 32768)

# for i in range(2):
#     for j in range(4096):
#         if send[i][j] < -0.5:
#             send[i][j] = -1
#         elif send[i][j] > 0.5:
#             send[i][j] = 1
#         else:
#             send[i][j] = 0

#plot the dataset
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.scatter(send[0],send[1])
ax1.set_xlabel("Real", fontsize=16)
ax1.set_ylabel("Imaginary", fontsize=16)
ax1.set_title("Transmitted Symbols", fontsize=20)
ax2 = fig.add_subplot(1,2,2)
ax2.scatter(test[1:16000:2],test[:16000:2])
ax2.set_xlabel("Real", fontsize=16)
ax2.set_ylabel("Imaginary", fontsize=16)
ax2.set_title("Received Symbols", fontsize=20)

# x = np.vstack((test[1::2], test[::2])).T   # 接收端收到的数据 16384 * 2
# s = sum([np.linalg.norm(xn) for xn in x]) /16384
# x /= s
# TxS = np.hstack((send,send,send,send)).T   # 发送端发出的数据 16384 * 2
#
# L = 12
# chL = 5
# EqD = int(round((L+chL)/2))   #8
# X = []
# for i in range(16000):
#     X.append(x[i:i+L+1])
# X = np.array(X).T
# Y = X[:,:,:16000]   #(2,13,16000)
# Y_real = Y[0].T
# Y_imag = Y[1].T
# Y = np.hstack((Y_real, Y_imag))  #(16000, 26)
# TxS = TxS[L:L+16000]  #(16000, 2)
# print TxS[:10]
#
# class Layer(object):
#     def __init__(self, inputs, in_size, out_size, activation_function=None):
#         self.W = theano.shared(np.random.normal(0,1,(in_size, out_size)))
#         self.Wx_plus_b = T.dot(inputs, self.W)
#         self.activation_function = activation_function
#         if activation_function:
#             self.outputs = self.activation_function(self.Wx_plus_b)
#         else:
#             self.outputs = self.Wx_plus_b
#
# # determine the inputs
# z = T.dmatrix('z')  # 发送序列
# y = T.dmatrix('y')  # 接收序列
#
# # add layers
# l1 = Layer(y, 2*L+2, 2, None)  #26入2出
#
# # loss function
# cost = T.mean(T.square(l1.outputs - z))
#
# # compute the gradients
# gW1 = T.grad(cost, l1.W)
# # apply the gradient descent
# learning_rate = 0.05
# train = theano.function(
#     inputs = [y,z],
#     outputs = cost,
#     updates = [(l1.W, l1.W - learning_rate * gW1)])
#
# # predict
# predict = theano.function(inputs=[y], outputs=l1.outputs)
#
# # train
# for i in range(500):
#     err = train(Y, TxS)
#     if i % 50 == 0:
#         print err
#
# predictValue = predict(Y)
# ax3 = fig.add_subplot(3,1,3)
# ax3.scatter(predictValue[::2],predictValue[1::2])
fig.savefig('test.eps')
plt.show()
