import pandas as pd
import numpy as np

data=pd.read_csv("data.tsv",names=["q1","q2","label"],encoding="utf-8",sep="\t")

def to_int(input):
  return int(input)
train_y_pd_split = data["label"].apply(to_int).tolist()
train_y_pd_split=train_y_pd_split[90000:92900]

predict_data=pd.read_csv("out.txt",names=["id","score"],encoding="utf-8",sep="\t")

train_predict = predict_data["score"]
valid_predict_binary = (train_predict >= 0.2) * 1
train_predict=train_predict.tolist()
from sklearn import metrics
print('LogLoss: %.4f' % metrics.log_loss(train_y_pd_split, train_predict))
print('AUC: %.4f' % metrics.roc_auc_score(train_y_pd_split, train_predict))
print('Recall: %.4f' % metrics.recall_score(train_y_pd_split, valid_predict_binary))
print('F1-score: %.4f' % metrics.f1_score(train_y_pd_split, valid_predict_binary))
print('Precesion: %.4f' % metrics.precision_score(train_y_pd_split, valid_predict_binary))
precision, recall, thresholds = metrics.precision_recall_curve(train_y_pd_split, train_predict)
all_f1 = []
for i in range(len(thresholds)):
    if i % 10 == 0:
        f1 = 2 * precision[i] * recall[i] / (precision[i] + recall[i])
        all_f1.append(f1)
        # print(f1,thresholds[i])
print("Best F1:" + str(np.max(all_f1)))
print("Best Thresholds:" + str(thresholds[np.argmax(all_f1)]))
