from model import *

sns.set(style='whitegrid', palette='muted', font_scale=1.2)

HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]

sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))

rcParams['figure.figsize'] = 12, 8

RANDOM_SEED = 10
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

PRE_TRAINED_MODEL_NAME = "hfl/chinese-roberta-wwm-ext"

tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME, do_lower_case=True)
PRE_TRAINED_MODEL_NAME = '../ROBERT_4_model.bin'

df_train = pd.read_csv('../data/train.csv', sep='\t')
df_val = pd.read_csv('../data/val.csv', sep='\t')

df_train = df_train.drop(df_train.loc[df_train.verbA0A1.isna()].index)
df_val = df_val.drop(df_val.loc[df_val.verbA0A1.isna()].index)

df_train = df_train.drop(df_train.loc[df_train.verbA0A1 == '[]'].index)
df_val = df_val.drop(df_val.loc[df_val.verbA0A1 == '[]'].index)



# Generating the masks of verb, A0, A1
def string_to_tuples_list(text):
    if text is np.nan or text == '[]':
        return []
    text = ''.join(text.split('], ['))
    tmp = eval(text.strip('[').strip(']'))
    if not isinstance(tmp[0], tuple):
        return [tmp]
    return list(tmp)


for col in ['verb', 'A0', 'A1']:
    df_train[col] = df_train[col].apply(string_to_tuples_list)
    df_val[col] = df_val[col].apply(string_to_tuples_list)

for col in ['stock_factors', 'verbA0A1']:
    # for col in ['verbA0A1']:
    df_train[col] = df_train[col].apply(ast.literal_eval)
    df_val[col] = df_val[col].apply(ast.literal_eval)



df_train = mask(df_train)
df_val = mask(df_val)

sns.countplot(df_train.label)
plt.xlabel('review score')

total = pd.concat([df_train, df_val])

total.sort_values(by='DATE')

# Creat Dataloader
max_len = 300
class_names = ['negative', 'neutral', 'positive']





df_train = df_train.reset_index(drop=True)
df_val = df_val.reset_index(drop=True)

BATCH_SIZE = 8  # TODO： batch_size大小

train_data_loader = create_data_loader(df_train, tokenizer, max_len, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, max_len, BATCH_SIZE)

# Model

NUMBER_FACTOR = 24

model = SentimentClassifier(3)
model = torch.load()
model = model.to(device)

EPOCHS = 10  # TODO

optimizer = AdamW(model.parameters(), lr=1e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS

scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=2,
    num_training_steps=total_steps
)

loss_fn = nn.CrossEntropyLoss().to(device)


history = defaultdict(list)
best_accuracy = 0
best_train_accuracy = 0

for epoch in range(EPOCHS):

  print(f'Epoch {epoch + 1}/{EPOCHS}')
  print('-' * 10)

  train_acc, train_loss, cl_acc = train_epoch(
    model,
    train_data_loader,
    loss_fn,
    optimizer,
    device,
    scheduler,
    len(df_train)
  )

  print(f'Train loss {train_loss} accuracy {train_acc} cl_accuracy{cl_acc}')

  if train_acc > best_train_accuracy:
    torch.save(model.state_dict(), 'best_train_model_state.bin')
    best_train_accuracy = train_acc
    print("save to best train model")

  val_acc, val_loss = eval_model(
    model,
    val_data_loader,
    loss_fn,
    device,
    len(df_val)
  )

  print(f'Val   loss {val_loss} accuracy {val_acc}')
  print()

  history['train_acc'].append(train_acc)
  history['train_loss'].append(train_loss)
  history['val_acc'].append(val_acc)
  history['val_loss'].append(val_loss)

  if val_acc > best_accuracy:
    torch.save(model.state_dict(), 'Factor24_Pretrained_RoBert_SRL(FC)_A0A1.bin')
    best_accuracy = val_acc
    print("save to best model")

plt.plot([i.cpu() for i in history['train_acc']], label='train accuracy')
plt.plot([i.cpu() for i in history['val_acc']], label='validation accuracy')

plt.title('Training history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1])
plt.show()
