import math

from sentence_transformers import InputExample, CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator, CESoftmaxAccuracyEvaluator
from datetime import datetime
import numpy as np
import pandas as pd

from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split


df = pd.read_pickle('/home/yx/project/P_prediction/ccks_1_sbert/data/data.pkl')

train_batch_size = 512
max_seq_length = 48
num_epochs = 40

model_name = 'hfl/chinese-macbert-base'

model_save_path = 'output/multi/' + \
    model_name.replace("/", "-")+'-' + \
    datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

label2int = {"品类_适用_场景": 0, "品类_搭配_品类": 1, "品类_适用_人物": 2, '人物_蕴含_场景': 3}

df_train, df_dev = train_test_split(df, train_size=0.9, random_state=725)

train_samples = []
for index, row in df_train.iterrows():
    label_id = label2int[row['predicate']]
    train_samples.append(InputExample(
        texts=[row['subject'], row['object']], label=label_id))

dev_samples = []
for index, row in df_dev.iterrows():
    label_id = label2int[row['predicate']]
    dev_samples.append(InputExample(
        texts=[row['subject'], row['object']], label=label_id))

train_dataloader = DataLoader(
    train_samples, shuffle=True, batch_size=train_batch_size)

evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(
    dev_samples, name='sts-dev')

warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)

model = CrossEncoder(model_name, num_labels=4)

model.fit(train_dataloader=train_dataloader,
          evaluator=evaluator,
          epochs=30,
          warmup_steps=warmup_steps,
          evaluation_steps=200,
          output_path=model_save_path,
          use_amp=True,
          )
df_dev = pd.read_pickle(
    '/home/yx/project/P_prediction/ccks_1_sbert/data/dev_data.pkl')

test_samples = []
for index, row in df_dev.iterrows():
    test_samples.append([row['subject'], row['object']])

model = CrossEncoder(model_save_path, num_labels=4)

results = model.predict(test_samples)


a = (results == results.max(axis=1, keepdims=1)).astype(float)
r_ids_1 = []
for i in a:
    r_ids_1.append(np.argwhere(i == 1)[0][0])

r_ids_2 = []
for s in df_dev.predicate.values:
    id = label2int[s]
    r_ids_2.append(id)

df_dev['id1'] = r_ids_1
df_dev['id2'] = r_ids_2

labels = []
ids = []
for index, row in df_dev.iterrows():
    if row['id1'] == row['id2']:
        labels.append(1)
    else:
        labels.append(0)
    ids.append(row['triple_id'])

pd.DataFrame({'salience': labels, 'triple_id': ids}).to_json(
    '/home/yx/project/P_prediction/ccks_1_sbert/data/result_multi.jsonl', orient='records', lines=True)
pd.DataFrame({'salience': labels, 'triple_id': ids}).to_pickle(
    '/home/yx/project/P_prediction/ccks_1_sbert/output/multi/multi.pkl')
