import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from random import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.python.keras.layers import Dense, Input
from tensorflow.python.keras.models import Model

data = pd.read_csv('anomaly_detection/data/ad_plan_data.csv')

floats_list = ['rcost','impr','click']
for f in floats_list:
    median = data[f].median()
    data[f].fillna(median,inplace=True)

idx = [i for i in range(len(data[data['label'] == 0]))]
shuffle(idx)
idx = idx[:len(data[data['label'] == 1])]

data_0 = data[data['label'] == 0]
data_sample = data_0.sample(n=len(data[data['label'] == 1]))

new_data = data_sample.append(data[data['label'] == 1])

x_train , x_test , y_train , y_test = train_test_split(new_data[['rcost','impr','click','dnd','dnu','dnr','dnpd','dnud','dnrd']] , new_data['label'] , test_size=0.3 , random_state = 7)

data_std = StandardScaler()
data_std.fit(x_train)
x_train = data_std.transform(x_train)
x_test = data_std.transform(x_test)


early_stopping_cb = EarlyStopping(patience=5, restore_best_weights=True)
checkpoint_cb = ModelCheckpoint("datas.h5", save_best_only=True)

data_input = Input(shape=(9,))
data_layer1 = Dense(128)(data_input)
data_layer2 = Dense(64)(data_layer1)
data_layer3 = Dense(32)(data_layer2)
data_layer4 = Dense(1, activation='sigmoid')(data_layer3) 

data_model = Model(data_input, data_layer4)
data_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

data_history = data_model.fit(np.array(list(x_train)), np.array(list(y_train)), 
                                epochs=100, validation_split=0.2, 
                                batch_size=128, callbacks=[early_stopping_cb, checkpoint_cb])

# 获取测试集上的预测概率
y_pred_prob = data_model.predict(x_test)

# 将概率转换为类别标签
y_pred = (y_pred_prob > 0.5).astype(int).flatten() # flatten将二维数组展平为一维

precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")


'''
tree = LogisticRegression()
tree.fit(x_train, y_train)


# 计算精确率、召回率和F1分数
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")
'''