import sys
import os
from sklearn import svm
from sklearn.decomposition import PCA
import numpy as np

def read4txt(txt):
    '''
    将txt中的电路读成二维列表
    '''
    with open(txt,'r') as f:
        lines = f.readlines()
        num_l = 1
        matr = []
        num_op = 0
        for line in lines:
            l = line.split()
            if l[0]=='xor':
                tem = [0]*num_l
                tem[int(l[1])]=1
                tem[int(l[2])]=1
                matr.append(tem)
                num_op+=1
            elif l[0]=='and':
                tem = [0]*num_l
                tem[int(l[1])]=2
                tem[int(l[2])]=2
                matr.append(tem)
                num_op+=1
            elif l[0]=='ref':
                tem = [0]*num_l
                tem[int(l[1])]=3
                matr.append(tem)
                num_op+=1
            else:
                pass
            num_l+=1
    re=[]
    for m in matr:
        m+=[0]*(len(matr[-1])-len(m))
        re.append(m)
    re = [n for a in re for n in a]
    return len(re),re,num_op

def loaddataset(folder,result):
    filename=os.path.join(folder,result)
    labels=[]
    re=[]
    lns=[]
    ops=[]
    with open(filename) as fl:
        lines = fl.readlines()
        for line in lines[:-1]:
            path = line.split()
            if len(path)>4:
                label=1
            else:
                label=0
            labels.append(label)
            cirpath=os.path.join(folder,path[0])
            print(cirpath)
            ln,ma,op=read4txt(cirpath)
            re.append(ma)
            lns.append(ln)
            ops.append(op)
    for i in range(len(re)):
        re[i]=re[i]+[0]*(max(lns)-len(re[i]))
    return labels,re,ops

arg1,arg2 = sys.argv[1],sys.argv[2]
labels,re,ops=loaddataset(arg1,arg2)
label = np.asarray(labels)
data = np.asarray(re)

pca = PCA(n_components=2)
new_data = pca.fit_transform(data)
with open(f'{arg1}_pca.txt','w') as f:
    f.write('xcol ycol zcol color\n')
    for i in zip(new_data,label):
        # x,y,z,color = i[0][0],i[0][1],i[0][2],i[1]
        x,y,color = i[0][0],i[0][1],i[1]
        # f.write(f'{x:.5f} {y:.5f} {z:.5f} {color}\n')
        f.write(f'{x:.5f} {y:.5f} {color}\n')

if label.shape[0]==new_data.shape[0]:
    length = label.shape[0]
    test_n = length//10
data_train = new_data[:-test_n]
data_test = new_data[-test_n:]
label_train = label[:-test_n]
label_test = label[-test_n:]
ops_train = ops[:-test_n]
ops_test = ops[-test_n:]

clf = svm.SVC(gamma='scale')
clf.fit(data_train,label_train)

xx, yy = np.meshgrid(np.linspace(data_train.min(), data_train.max(), 100),np.linspace(label_train.min(), label_train.max(), 100))

Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

np.savetxt(f'{arg1}_SVM_visual.dat', np.column_stack((xx.ravel(), yy.ravel(), Z.ravel())), fmt='%.6f', delimiter=' ', header='x y z', comments='')

raise Exception(f'{xx.shape,yy.shape,Z.shape}')

n = 0
TP =0
TN =0
FP =0
FN =0
path = f'{arg1}_{arg2}_result.txt'
import time
import json
time_ops=[]
s_time = time.time()
for i in range(label_test.shape[0]):
    test = data_test[i].reshape(1,-1)
    ss_time = time.time()
    cate_pre = clf.predict(test)
    ee_time = time.time()
    time_ops.append([ee_time-ss_time,ops_test[i]])
    if cate_pre[0]==label_test[i]:
        n+=1
    if cate_pre[0]==1 and label_test[i]==1:
        TP+=1
    if cate_pre[0]==1 and label_test[i]==0:
        FP+=1
    if cate_pre[0]==0 and label_test[i]==0:
        TN+=1
    if cate_pre[0]==0 and label_test[i]==1:
        FN+=1
e_time = time.time()       
cost = e_time-s_time
result = f'准确分类个数:{n}\n总样本数{label_test.shape[0]}\n准确率:{n/label_test.shape[0]}\n平均每个样本用时:{cost/n}\nTP:{TP}\nTN:{TN}\nFP:{FP}\nFN:{FN}\n精度:{TP/(TP+FP)}\n召回率:{TP/(TP+FN)}\n假阴:{FP/(TN+FP)}'
with open('_s_time.json','w') as f:
    json.dump(time_ops,f)
with open(path,'w') as f:
    f.write(result)


import tensorflow as tf 
from tensorflow import keras as tk
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv1D, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, RMSprop,SGD
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential

def mlp_random(classes, number_of_samples, activation, neurons, layers, learning_rate):
    model = Sequential()
    model.add(BatchNormalization(input_shape=(number_of_samples,)))
    for l_i in range(layers):
        model.add(Dense(neurons, activation=activation, kernel_initializer='he_uniform', bias_initializer='zeros'))
    model.add(Dense(classes, activation='sigmoid'))
    # model.add(Dense(classes))
    model.summary()
    optimizer = RMSprop(learning_rate=learning_rate)#categorical_crossentropy
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['acc'])
    model.summary()
    return model

def run_mlp(X_profiling, Y_profiling, X_validation, Y_validation,classes):
    mini_batch = 50 #random.randrange(500, 1000, 100)
    learning_rate = 0.000180006094109679
    activation = 'relu'
    layers = 6
    neurons = 200

    model = mlp_random(classes, len(X_profiling[0]), activation, neurons, layers, learning_rate)
    # es = EarlyStopping(monitor='val_accuracy',mode='max',patience=20,restore_best_weights=True)
    his = model.fit(
        x=X_profiling,
        y=Y_profiling,
        batch_size=mini_batch,
        verbose=2,
        epochs=50,
        shuffle=True,
        validation_data=(X_validation, Y_validation))
        # callbacks=[es])

    return his
    # prediction = model.predict(X_validation)
    # prediction = prediction.reshape(-1)
    # Y_validation = Y_validation.reshape(-1)
    # corr = np.corrcoef(Y_validation,prediction)
    # K.clear_session()
    # return prediction

history = run_mlp(data_train, label_train, data_test, label_test,1)

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
with open(f'{arg1}_result','w') as f:
    f.write("epoch acc val_acc loss val_loss\n")
    for e in epochs:
        str = "{} {} {} {} {}\n".format(e,acc[e-1],val_acc[e-1],loss[e-1],val_loss[e-1])
        f.write(str)