# coding: utf-8
import sys
import os
root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
print(f'root is [{root}]')
sys.path.append(root)
import os
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
import warnings
warnings.filterwarnings('ignore')
from graphgpt.model import GraphLlamaForCausalLM
import torch
import json
from graphgpt.conversation import conv_templates, SeparatorStyle
from graphgpt.model.utils import KeywordsStoppingCriteria
import tqdm
import os
import time

graph_data_model_dir = '/home/wangsibo/datas'
graph_model_dir = os.path.join(graph_data_model_dir, 'llms/GraphGPT-7B-mix-all')
gnn_dir = os.path.join(graph_data_model_dir, 'graph-transformer')
instruct_file = os.path.join(graph_data_model_dir, 'eval_np_lp/arxiv_pub_node_st_cot_link_mix.json')
eval_data = json.load(open(instruct_file, 'r', encoding='utf-8'))
from graphgpt.model.GraphLlama import load_model_pretrained
from graphgpt.eval.run_graphgpt import load_graph
from transformers import AutoTokenizer

DEFAULT_GRAPH_TOKEN = "<graph>"
DEFAULT_GRAPH_PATCH_TOKEN = "<g_patch>"
DEFAULT_G_START_TOKEN = "<g_start>"
DEFAULT_G_END_TOKEN = "<g_end>"

from graphgpt.model import *

gnn, graph_args = load_model_pretrained(CLIP, gnn_dir)

model = GraphLlamaForCausalLM.from_pretrained(pretrained_model_name_or_path=graph_model_dir,
                                              torch_dtype=torch.float16,
                                              use_cache=True,
                                              low_cpu_mem_usage=True,
                                              device_map='cuda:0',
                                              )

print('>>>>> load model done!')
import copy

graph_data_path = os.path.join(graph_data_model_dir, 'graph_data/graph_data_all.pt')
graph_dict = load_graph(eval_data[0], graph_data_path)
print('>>>>> load grad data done!')
graph_data = graph_dict['graph_data']
graph_token_len = graph_dict['graph_token_len']
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=graph_model_dir)
print('>>>>>> tokenizer made!')
model.get_model().get_graph_tower().config.graph_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_GRAPH_PATCH_TOKEN])[0]
qs = eval_data[0]["conversations"][0]["value"]
use_graph_start_end = True
print(f'>>>>>>[ORI QS]= {qs}')
"""
if use_graph_start_end:
    qs = qs + '\n' + DEFAULT_G_START_TOKEN + DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len + DEFAULT_G_END_TOKEN
else:
    qs = qs + '\n' + DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len
"""
replace_token = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len
replace_token = DEFAULT_G_START_TOKEN + replace_token + DEFAULT_G_END_TOKEN
qs = qs.replace(DEFAULT_GRAPH_TOKEN, replace_token)
print(f">>>>>> [qs]:\n {qs}")
conv_template = conv_templates["graphchat_v1"].copy()
conv_template.append_message(conv_template.roles[0], qs)
prompt = conv_template.get_prompt()
inputs = tokenizer([prompt])

input_ids = torch.as_tensor(inputs.input_ids).cuda()

stop_str = conv_template.sep if conv_template.sep_style != SeparatorStyle.TWO else conv_template.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
graph_data.graph_node = graph_data.graph_node.to(torch.float16)
s0 = time.time()
with torch.inference_mode():
    output_ids = model.generate(
        input_ids.cuda(),
        graph_data=graph_data.cuda(),
        do_sample=True,
        temperature=0.2,
        max_new_tokens=1024,
        stopping_criteria=[stopping_criteria])
print(f'>>>>> inference done! cost {time.time() - s0}s')

input_token_len = input_ids.shape[1]
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
if n_diff_input_output > 0:
    print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
outputs = outputs.strip()
if outputs.endswith(stop_str):
    outputs = outputs[:-len(stop_str)]
outputs = outputs.strip()

print(outputs)

