# """
# @name : 局域网存活IP检测
# @author : qhy
# @projectname: tiannongbabu
# """
#
# #subprocess模块，可以调用Linux里的部分命令,ping和管道符号
# # import subprocess
# # #IPY模块里含有大量的IP地址，包括网段、网络掩码、广播地址、子网数、IP类型
# # import IPy
# # # network segment是网段
# # segment = input("请输入要检查的网段、子网掩码：")
# # from IPy import IP
# # # 得到局域网的ip网
# # ip = IP(segment)
# #Popen可使用ping命令
# # from subprocess import Popen
# # file1 = "ip存活的文件"
# # file2 = "ip不存活的文件"
# # for x in ip:
# #     # 运行ping命令，并定义管道的输出为标准输出
# #     result1 = subprocess.Popen('ping %s'%x,stdout=subprocess.PIPE)
# #     # 借助read()方法，读取管道的输出
# #     result = result1.stdout.read().decode('gbk')
# #     # print(result)一直显示Ping 请求找不到主机 x？？
# #
# #     # 如果ping成功会有"回复"两字，放到ip存活的文件去
# #
# #     if '回复' in result:
# #         with open(file1, "w+", encoding="gbk")as f1:
# #             f1.write(str(x)+'\n')
# #     # # 相反就写到ip不存活的文件中去
# #     else:
# #         with open(file2, "w+", encoding="gbk")as f2:
# #             f2.write(str(x)+'\n')
#             # print(f"{x}地址异常哦！")
#
#
# import torch
# # prepare dataset
# # x,y是矩阵，3行1列 也就是说总共有3个数据，每个数据只有1个特征
# x_data = torch.tensor([[1.0], [2.0], [3.0]])
# y_data = torch.tensor([[2.0], [4.0], [6.0]])
#
# # design model using class
# """
# our model class should be inherit from nn.Module, which is base class for all neural network modules.
# member methods __init__() and forward() have to be implemented
# class nn.linear contain two member Tensors: weight and bias
# class nn.Linear has implemented the magic method __call__(),which enable the instance of the class can
# be called just like a function.Normally the forward() will be called
# """
# class LinearModel(torch.nn.Module):
#     def __init__(self):
#         super(LinearModel, self).__init__()
#         # (1,1)是指输入x和输出y的特征维度，这里数据集中的x和y的特征都是1维的
#         # 该线性层需要学习的参数是w和b  获取w/b的方式分别是~linear.weightnear.bias
#         self.linear = torch.nn.Linear(1, 1)
#
#     def forward(self, x):
#         y_pred = self.linear(x)
#         return y_pred
#
# model = LinearModel()
#
# # construct loss and optimizer
# # criterion = torch.nn.MSELoss(size_average = False)
# criterion = torch.nn.MSELoss(reduction='sum')
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # model.parameters()自动完成参数的初始化操作
#
# # training cycle forward, backward, update
# for epoch in range(100):
#     y_pred = model(x_data)  # forward:predict
#     loss = criterion(y_pred, y_data)  # forward: loss
#     print(epoch, loss.item())
#
#     optimizer.zero_grad()  # the grad computer by .backward() will be accumulated. so before backward, remember set the grad to zero
#     loss.backward()  # backward: autograd，自动计算梯度
#     optimizer.step()  # update 参数，即更新w和b的值
#
# print('w = ', model.linear.weight.item())
# print('b = ', model.linear.bias.item())
#
# x_test = torch.tensor([[4.0]])
# y_test = model(x_test)
# print('y_pred = ', y_test.data)
#
#
#





# -*- coding: utf-8 -*-

# 拉格朗日插值
# import pandas as pd
# from scipy.interpolate import lagrange
# inputfile = 'eeeee/chapter6/test/data/missing_data.xls'
# outputfile = 'eeeee/chapter6/test/data/missing_data_processed.xls'
# data = pd.read_excel(inputfile, header=None)
# print (data)
# # 自定义插值函数
# # s为列向量，n为被插值的位置，k为取前后的数据个数，默认5
# def ployinterp_column(s, n, k=5):
#     y = s[list(range(n-k, n)) + list(range(n+1, n+1+k))]
#     y = y[y.notnull()]
#     return lagrange(y.index, list(y))(n)
#
# # 逐个元素判断是否需要插值
# for i in data.columns:
#     for j in range(len(data)):
#         if (data[i].isnull())[j]:
#             data[i][j] = ployinterp_column(data[i], j)
#
# data.to_excel(outputfile, header=None, index=False)
# print (data)

"""
原始数据
           0         1         2
0   235.8333  324.0343  478.3231
1   236.2708  325.6379  515.4564
2   238.0521  328.0897  517.0909
3   235.9063       NaN  514.8900
4   236.7604  268.8324       NaN
5        NaN  404.0480  486.0912
6   237.4167  391.2652  516.2330
7   238.6563  380.8241       NaN
8   237.6042  388.0230  435.3508
9   238.0313  206.4349  487.6750
10  235.0729       NaN       NaN
11  235.5313  400.0787  660.2347
12       NaN  411.2069  621.2346
13  234.4688  395.2343  611.3408
14  235.5000  344.8221  643.0863
15  235.6354  385.6432  642.3482
16  234.5521  401.6234       NaN
17  236.0000  409.6489  602.9347
18  235.2396  416.8795  589.3457
19  235.4896       NaN  556.3452
20  236.9688       NaN  538.3470
插值后
             0           1           2
0   235.833300  324.034300  478.323100
1   236.270800  325.637900  515.456400
2   238.052100  328.089700  517.090900
3   235.906300  203.462116  514.890000
4   236.760400  268.832400  493.352591
5   237.151181  404.048000  486.091200
6   237.416700  391.265200  516.233000
7   238.656300  380.824100  493.342382
8   237.604200  388.023000  435.350800
9   238.031300  206.434900  487.675000
10  235.072900  237.348072  609.193564
11  235.531300  400.078700  660.234700
12  235.314951  411.206900  621.234600
13  234.468800  395.234300  611.340800
14  235.500000  344.822100  643.086300
15  235.635400  385.643200  642.348200
16  234.552100  401.623400  618.197198
17  236.000000  409.648900  602.934700
18  235.239600  416.879500  589.345700
19  235.489600  420.748600  556.345200
20  236.968800  408.963200  538.347000
"""
# 数据划分
from random import shuffle#打乱数据

datafile = 'eeeee/chapter6/test/data/model.xls'

data = pd.read_excel(datafile).as_matrix()
shuffle(data)

p = 0.8
train = data[:int(len(data)*p), :]
test = data[int(len(data)*p), :]

from keras.models import Sequential
from keras.layers.core import Dense, Activation

netfile = 'eeeee/chapter6/test/data/net.model'

net = Sequential()
net.add(Dense(3, 10))
net.add(Activation('relu'))
net.add(Dense(10, 1))
net.add(Activation('sigmoid'))
net.compile(loss='binary_crossentropy', optimizer='adam', class_mode='binary')

net.fit(train[:, :3], train[:, 3], nb_epoch=1000, batch_size=1)
net.save_weights(netfile)

predict_result = net.predict_classes(train[:,:3]).reshape(len(train))

def cm_plot(y, yp):

  from sklearn.metrics import confusion_matrix #导入混淆矩阵函数

  cm = confusion_matrix(y, yp) #混淆矩阵

  import matplotlib.pyplot as plt #导入作图库
  plt.matshow(cm, cmap=plt.cm.Greens) #画混淆矩阵图，配色风格使用cm.Greens，更多风格请参考官网。
  plt.colorbar() #颜色标签

  for x in range(len(cm)): #数据标签
    for y in range(len(cm)):
      plt.annotate(cm[x,y], xy=(x, y), horizontalalignment='center', verticalalignment='center')

  plt.ylabel('True label') #坐标轴标签
  plt.xlabel('Predicted label') #坐标轴标签
  return plt

cm_plot(train[:,3], predict_result).show()

from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier() #建立决策树模型
#对模型进行训练
tree.fit(train[:,:3], train[:,3])

cm_plot(test[:,3], tree.predict(test[:,:3])).show()

from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
#fpr即上文提到的假正例率，tpr即上文提到过的真正例率，分别为ROC曲线的横纵坐标，而predic_proba是预测每个分类的概率
fpr, tpr, thresholds = roc_curve(data_test[:,3], tree.predict_proba(data_test[:,:3])[:,1], pos_label=1)
#作出ROC曲线
plt.plot(fpr, tpr, linewidth=2, label = 'ROC of CART', color = 'blue')
#x轴标签
plt.xlabel('False Positive Rate')
#y轴标签
plt.ylabel('True Positive Rate')
#y轴范围
plt.ylim(0,1.05)
#x轴范围
plt.xlim(0,1.05)
#图例
plt.legend(loc=4)
plt.show()