
import argparse
import time
import mindspore as ms
import numpy as np
from mindformers import MindFormerConfig
from mindspore_gs.common import logger
from mindspore_gs.datasets import create_boolq_dataset
from mindspore_gs.ptq.network_helpers.mf_net_helpers import MFParallelLlama2Helper
from LRD1 import SVDCompression
from mindspore import context
import pdb
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
from mindspore_gs.ptq.ptq import PTQ
from mindspore_gs.common import BackendTarget
from mindspore_gs.ptq import PTQMode, PTQConfig
from mindspore_gs.ptq import PTQConfig, OutliersSuppressionType
from mindspore_gs.datasets import get_datasets
def evaluate(net, dataset_path, network_helper, n_samples):
    top_k = network_helper.get_spec("top_k")
    top_p = network_helper.get_spec("top_p")
    do_sample = network_helper.get_spec("do_sample")
    batch_size = network_helper.get_spec("batch_size")
    seq_length = network_helper.get_spec("seq_length")
    ignore_token_id = network_helper.get_spec("ignore_token_id")
    pad_token_id = network_helper.get_spec("pad_token_id")
    tokenizer = network_helper.create_tokenizer()

    ds = create_boolq_dataset(dataset_path, "eval", batch_size, seq_length, tokenizer, ignore_token_id,
                              n_samples=n_samples)

    correct = 0
    data_count = 0
    total_count = ds.get_dataset_size()
    for _, ds_item in enumerate(ds.create_dict_iterator()):
        data_count += 1
        print(f"Dataset count: {data_count}/{total_count}", flush=True)
        input_ids = ds_item['input_ids'].asnumpy()
        labels = ds_item['labels'].asnumpy()

        batch_valid_length = []
        for j in range(input_ids.shape[0]):
            batch_valid_length.append(np.max(np.argwhere(input_ids[j] != pad_token_id)) + 1)
        batch_valid_length = np.array(batch_valid_length)

        outputs = net.generate(input_ids, do_sample=do_sample, max_length=seq_length, top_k=top_k, top_p=top_p,
                               max_new_tokens=8)
        output_ids = []
        for j in range(input_ids.shape[0]):
            output_ids.append(outputs[j][int(batch_valid_length[j]):])

        question = tokenizer.decode(input_ids, skip_special_tokens=True)
        pres_str = tokenizer.decode(output_ids, skip_special_tokens=True)
        labels_str = tokenizer.decode(labels, skip_special_tokens=True)

        if labels_str[0].lower() in pres_str[0].lower():
            correct += 1
            print(f"question: {question}\n predict: {pres_str} answer: {labels_str}. correct!", flush=True)
        else:
            print(f"question: {question}\n predict: {pres_str} answer: {labels_str}. not correct!", flush=True)
        if data_count % 100 == 0:
            print(f"acc: {correct / data_count}", flush=True)
    print(f"total acc: {correct / data_count}", flush=True)
    print('Evaluate Over!', flush=True)

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_path', '-c', type=str, required=True)
    parser.add_argument('--dataset_path', '-s', type=str, required=True)
    parser.add_argument('--calib_dataset_path', '-calib', type=str, required=True)
    parser.add_argument('--ratio', '-r', type=float, default=0.5)
    parser.add_argument('--n_samples', '-n', type=int, default=-1)
    args = parser.parse_args()
    logger.info(f"evaluate args: {args}")
    return args

if __name__ == "__main__":
    start = time.time()
    uargs = get_args()

    logger.info('Creating network...')
    config = MindFormerConfig(uargs.config_path)

    helper = MFParallelLlama2Helper(config)
    tokenizer = helper.create_tokenizer()

    calib_loader = create_boolq_dataset(
        uargs.calib_dataset_path, "eval", helper.get_spec("batch_size"),
        helper.get_spec("seq_length"), tokenizer,
        helper.get_spec("ignore_token_id"), n_samples=100
    )

    network = helper.create_network()

    # # 使用WhiteningSVD进行模型压缩 (新apply接口整合)
    whitening_svd = SVDCompression(network,helper,rank_ratio=uargs.ratio)
    whitening_svd.profile(calib_loader)
    whitening_svd.apply()

    # # pdb.set_trace()
    # ds_path = '/home/workspace/sjy/EFLM_2025/quant-net/squad/dev-v1.1.json'
    # bs_ = helper.get_spec('batch_size')
    # seq_ = helper.get_spec('seq_length')
    # max_decode_length = helper.get_spec('max_decode_length')
    # ignore_token_id = helper.get_spec('ignore_token_id')
    # tokenizer = helper.create_tokenizer()
    # ds = get_datasets('squad1.1', ds_path, "train", bs_, seq_, max_decode_length, tokenizer, ignore_token_id, 1,
    #                 False, n_samples=200)

    # ptq_config = PTQConfig(mode=PTQMode.QUANTIZE, backend=BackendTarget.ASCEND, opname_blacklist=["w2", "lm_head"],
    #                    weight_quant_dtype=ms.dtype.int8, act_quant_dtype=ms.dtype.int8, kvcache_quant_dtype=None,)
    # ptq = PTQ(config=ptq_config)
    # ptq.apply(network,helper,ds)
    # # ptq.apply(network)
    # ptq.convert(network)

    # # with open('output_2.txt','w') as f:
    # #     f.write(f"Network: {network}")
    # ms.save_checkpoint(network.parameters_dict(), "lr_a8w8c8_8_2.ckpt",
    #                choice_func=lambda x: "key_cache" not in x and "value_cache" not in x and "float_weight" not in x)
    # print("quant checkpoint saved at 'lr_a8w8c8_8_2.ckpt'", flush=True)

    network.set_train(False)
    network.phase = 'predict'

    ptq_config = PTQConfig(mode=PTQMode.DEPLOY, backend=BackendTarget.ASCEND, opname_blacklist=["w2", "lm_head"],
                        weight_quant_dtype=ms.dtype.int8, act_quant_dtype=ms.dtype.int8, kvcache_quant_dtype=ms.dtype.int8)
    ptq = PTQ(config=ptq_config)
    ptq.apply(network)
    ptq.convert(network)

    ms.load_checkpoint('/home/workspace/sjy/EFLM_2025/golden-stick/mindspore_gs/experimental/LRD/lr_a8w8c8_8_2.ckpt', network)
    evaluate(network, uargs.dataset_path, helper, 2000)















