#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : PromptPaddle.py
# @Author: Richard Chiming Xu
# @Date  : 2023/3/14
# @Desc  : 基于paddle做的prompt
import json
import os

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split

import paddle
from paddlenlp.transformers import AutoTokenizer, AutoModelForMaskedLM
from paddlenlp.datasets import MapDataset
from paddlenlp.prompt import AutoTemplate, SoftVerbalizer, PromptTuningArguments, PromptModelForSequenceClassification,PromptTrainer
from paddle.metric import Accuracy
from paddlenlp.trainer import EarlyStoppingCallback, PdArgumentParser
import warnings

warnings.filterwarnings("ignore")


def set_random_seed(config):
    np.random.seed(config.random_seed)
    paddle.seed(config.random_seed)


class Config():
    mode = 'train'  # 训练/预测

    train_data = 'data/train.csv'
    test_data = 'data/test.csv'
    result_data_save = 'result/prompt.csv'
    model_save = './model/prompt'
    max_seq_size = 24  # 最大句子长度
    # label的词典
    label_k_v_vocab = {'HomeAppliance-Control': 0, 'Calendar-Query': 1, 'Audio-Play': 2, 'Music-Play': 3,
                       'TVProgram-Play': 4, 'Video-Play': 5, 'Weather-Query': 6, 'Other': 7, 'Alarm-Update': 8,
                       'Travel-Query': 9, 'FilmTele-Play': 10, 'Radio-Listen': 11}
    label_v_k_vocab = dict(zip(label_k_v_vocab.values(), label_k_v_vocab.keys()))
    label_prompt_verbalizer = {'HomeAppliance-Control': '控制家电', 'Calendar-Query': '查询日历',
                               'Audio-Play': '播放音频', 'Music-Play': '播放音乐',
                               'TVProgram-Play': '播放电视节目', 'Video-Play': '播放视频', 'Weather-Query': '查询天气',
                               'Other': '干其他事', 'Alarm-Update': '更新行程',
                               'Travel-Query': '查询差旅细节', 'FilmTele-Play': '播放剧集', 'Radio-Listen': '听电台'}
    test_size = 0.8  # 测试集大小
    random_seed = 42  # 种子
    # 模型
    model = 'ernie-3.0-base-zh'

    # 模板
    prompt = "{'text': 'text'} 这句话的类别是{'mask'}"
    # 训练参数
    train_args = ["--output_dir", model_save,
                  "--learning_rate", "1e-5",
                  "--ppt_learning_rate", "1e-4",
                  "--num_train_epochs", "15",
                  "--logging_steps", "20",
                  "--per_device_train_batch_size", "64",
                  "--per_device_eval_batch_size", "32",
                  "--metric_for_best_model", "accuracy",
                  "--load_best_model_at_end", "True",
                  "--evaluation_strategy", "epoch",
                  "--save_strategy", "epoch",
                  "--load_best_model_at_end", "True"
                  ]


'''
    数据读取
'''


def convert_labels_to_ids(data_ds, verbalizer):
    '''
    转换label
    :param data_ds:
    :param verbalizer:
    :return:
    '''
    new_data_ds = []
    for sample in data_ds:
        sample['labels'] = verbalizer.labels_to_ids[sample['labels']]
        new_data_ds.append(sample)
    return MapDataset(new_data_ds)


def create_dataset(config: Config):
    if config.mode == 'train':
        # 读取数据
        data_df = pd.read_csv(config.train_data, sep='\t', header=None)
        data_df = data_df.rename(columns={0: 'text', 1: 'labels'})
        # data_df['labels'] = data_df['labels'].map(config.label_k_v_vocab)
        # 切分数据
        X_train, X_val, y_train, y_val = train_test_split(data_df['text'].tolist(), data_df['labels'].tolist(),
                                                          test_size=config.test_size,
                                                          random_state=config.random_seed)
        # 生成dataset
        train_dataset = MapDataset([{'text': X_train[i], 'labels': y_train[i]} for i in range(len(X_train))])
        val_dataset = MapDataset([{'text': X_val[i], 'labels': y_val[i]} for i in range(len(X_val))])
        return train_dataset, val_dataset
    elif config.mode == 'predict':
        # 读取数据
        data_df = pd.read_csv(config.test_data, sep='\t', header=None)
        data_df = data_df.rename(columns={0: 'text'})

        return MapDataset([{'text': row['text']} for idx, row in data_df.iterrows()])


'''
    训练/预测
'''


# 评估函数
def compute_metrics(eval_preds):
    metric = Accuracy()
    correct = metric.compute(paddle.to_tensor(eval_preds.predictions),
                             paddle.to_tensor(eval_preds.label_ids))
    metric.update(correct)
    acc = metric.accumulate()
    return {'accuracy': acc}

def train(config: Config, train_dataset, val_dataset, model, template, verbalizer):
    # 定义训练参数
    parser = PdArgumentParser((PromptTuningArguments,))
    training_args = parser.parse_args_into_dataclasses(args=config.train_args, look_for_args_file=False)[0]

    # 定义模型
    prompt_model = PromptModelForSequenceClassification(model, template, verbalizer, freeze_plm=False,
                                                        freeze_dropout=False)

    # 损失函数
    criterion = paddle.nn.CrossEntropyLoss()

    # 早停策略（可选）
    callbacks = [EarlyStoppingCallback(early_stopping_patience=16, early_stopping_threshold=0.)]

    # Trainer 定义
    trainer = PromptTrainer(model=prompt_model,
                            tokenizer=tokenizer,
                            args=training_args,
                            criterion=criterion,
                            train_dataset=train_dataset,
                            eval_dataset=val_dataset,
                            callbacks=callbacks,
                            compute_metrics=compute_metrics)

    # 训练模型
    train_result = trainer.train(resume_from_checkpoint=None)
    metrics = train_result.metrics
    trainer.log_metrics("train", metrics)
    # 保存模型
    trainer.save_model()
    trainer.save_metrics("train", metrics)
    trainer.save_state()
    model.save_pretrained(config.model_save)
    return trainer

def predict(config: Config, test_dataset, model, template, verbalizer):
    # 加载
    prompt_model = PromptModelForSequenceClassification(model, template, verbalizer, freeze_plm=True,
                                                        freeze_dropout=True)
    trainer = PromptTrainer(model=prompt_model, tokenizer=tokenizer)
    trainer.load_state_dict_from_checkpoint(resume_from_checkpoint=config.model_save)
    # trainer.load_state_dict_from_checkpoint(resume_from_checkpoint=config.model_save)

    # 预测结果
    predict_result = trainer.predict(test_dataset=test_dataset)
    logits = np.argmax(predict_result.predictions[0], axis=1)
    result_df = pd.DataFrame()
    labels_to_ids = verbalizer.labels_to_ids
    ids_to_labels = dict(zip(labels_to_ids.values(), labels_to_ids.keys()))
    result_df['Target'] = [ids_to_labels[result] for result in logits]
    result_df['ID'] = [i + 1 for i in range(len(result_df))]
    result_df[['ID', 'Target']].to_csv(config.result_data_save, index=False)
    print('done.')





if __name__ == '__main__':
    config = Config()
    set_random_seed(config)
    paddle.device.set_device('gpu:0')

    config.mode = 'train'
    if config.mode == 'train':
        # 加载数据与基础模型
        train_dataset, val_dataset = create_dataset(config)
        model = AutoModelForMaskedLM.from_pretrained(config.model)
        tokenizer = AutoTokenizer.from_pretrained(config.model)
        # 定义模板
        template = AutoTemplate.create_from(config.prompt, tokenizer, max_length=config.max_seq_size, model=model)
        # 定义标签词映射
        verbalizer = SoftVerbalizer(tokenizer=tokenizer, model=model,
                                    labels=list(config.label_prompt_verbalizer.keys()),
                                    label_words=config.label_prompt_verbalizer)
        # 转换label
        train_dataset = convert_labels_to_ids(train_dataset, verbalizer)
        val_dataset = convert_labels_to_ids(val_dataset, verbalizer)
        # 训练
        trainer = train(config, train_dataset, val_dataset, model, template, verbalizer)


        '''
            预测，由于paddle在windows无法直接加载静态json。暂时先一部过
        '''
        config.mode = 'predict'
        test_dataset = create_dataset(config)
        # 预测结果
        predict_result = trainer.predict(test_dataset=test_dataset)
        logits = np.argmax(predict_result.predictions[0], axis=1)
        result_df = pd.DataFrame()
        labels_to_ids = verbalizer.labels_to_ids
        ids_to_labels = dict(zip(labels_to_ids.values(), labels_to_ids.keys()))
        result_df['Target'] = [ids_to_labels[result] for result in logits]
        result_df['ID'] = [i + 1 for i in range(len(result_df))]
        result_df[['ID', 'Target']].to_csv(config.result_data_save, index=False)
        print('done.')

    else:
        '''
            未开发完
        '''
        # 加载数据与基础模型
        test_dataset = create_dataset(config)
        model = AutoModelForMaskedLM.from_pretrained(config.model_save)
        tokenizer = AutoTokenizer.from_pretrained(config.model_save)
        # 定义模板
        template = AutoTemplate.load_from(data_path=config.model_save, tokenizer=tokenizer, max_length=config.max_seq_size,model=model)
        with open(config.model_save+'/template_config.json', "r", encoding='utf-8') as fp:
            prompt = json.load(fp)
        template = AutoTemplate.create_from(prompt=prompt, tokenizer=tokenizer, max_length=config.max_seq_size, model=model)
        template_param_file = os.path.join(config.mode, 'template_state.pdparams')
        if os.path.isfile(template_param_file):
            template.set_state_dict(paddle.load(template_param_file))
        # 定义标签词映射
        with open(config.model_save+'/verbalizer_config.json', "r", encoding='utf-8') as fp:
            label_words = json.load(fp)

        verbalizer = SoftVerbalizer(label_words=label_words, tokenizer=tokenizer,model=model)
        verb_state_file = os.path.join(config.mode, 'verbalizer_state.pdparams')
        if os.path.isfile(verb_state_file):
            verbalizer.set_state_dict(paddle.load(verb_state_file))

        # 训练
        predict(config, test_dataset, model, template, verbalizer)



