# coding: utf-8
import sys
import os
root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
print(f'root is [{root}]')
sys.path.append(root)
import warnings
warnings.filterwarnings('ignore')
from graphgpt.model import GraphLlamaForCausalLM
import torch
from traceback import print_exc
import json
from graphgpt.conversation import conv_templates, SeparatorStyle
from graphgpt.model.utils import KeywordsStoppingCriteria
from graphgpt.eval.run_graphgpt_LP import load_graph_LP
import tqdm
import os
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
graph_data_model_dir = '/home/wangsibo/datas'

graph_model_dir = os.path.join(graph_data_model_dir, 'llms/GraphGPT-7B-mix-all')
gnn_dir = os.path.join(graph_data_model_dir, 'graph-transformer')
# instruct_file = os.path.join(graph_data_model_dir, 'GraphGPT-main/eval_np_lp/arxiv_pub_node_st_cot_link_mix.json')
# eval_data = json.load(open(instruct_file, 'r', encoding='utf-8'))
instruct_file = './arxiv_lp.json'
eval_data = json.load(open(instruct_file, 'r', encoding='utf-8'))

from graphgpt.model.GraphLlama import load_model_pretrained
from graphgpt.eval.run_graphgpt import load_graph
from transformers import AutoTokenizer

DEFAULT_GRAPH_TOKEN = "<graph>"
DEFAULT_GRAPH_PATCH_TOKEN = "<g_patch>"
DEFAULT_G_START_TOKEN = "<g_start>"
DEFAULT_G_END_TOKEN = "<g_end>"

from graphgpt.model import *

gnn, graph_args = load_model_pretrained(CLIP, gnn_dir)

# model = None
model = GraphLlamaForCausalLM.from_pretrained(pretrained_model_name_or_path=graph_model_dir,
                                              torch_dtype=torch.float16,
                                              use_cache=True,
                                              low_cpu_mem_usage=True,
                                              # device_map='cuda:0'
                                              )
model = model.cuda()

tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=graph_model_dir)

graph_data_path = os.path.join(graph_data_model_dir, 'graph_data/graph_data_all.pt')

model.get_model().get_graph_tower().config.graph_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_GRAPH_PATCH_TOKEN])[0]
use_graph_start_end = True
model.get_model().get_graph_tower().use_graph_start_end = use_graph_start_end
'''
if use_graph_start_end:
    graph_config.graph_start_token, graph_config.graph_end_token = tokenizer.convert_tokens_to_ids([DEFAULT_G_START_TOKEN, DEFAULT_G_END_TOKEN])
    # TODO: add graph token len
'''
instruct_item = eval_data[0]

graph_dict = load_graph_LP(instruct_item, graph_data_path)
graph_token_len_1 = graph_dict['graph_token_len_1']
graph_token_len_2 = graph_dict['graph_token_len_2']
graph_data = graph_dict['graph_data']
qs = instruct_item["conversations"][0]["value"]

replace_token_1 = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len_1
replace_token_2 = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len_2

replace_token_1 = DEFAULT_G_START_TOKEN + replace_token_1 + DEFAULT_G_END_TOKEN
replace_token_2 = DEFAULT_G_START_TOKEN + replace_token_2 + DEFAULT_G_END_TOKEN

if DEFAULT_GRAPH_TOKEN in qs:
    first_index = qs.find(DEFAULT_GRAPH_TOKEN)
    qs = qs[:first_index] + replace_token_1 + qs[first_index+len(DEFAULT_GRAPH_TOKEN):]

    second_index = qs.find(DEFAULT_GRAPH_TOKEN)
    qs = qs[:second_index] + replace_token_2 + qs[second_index+len(DEFAULT_GRAPH_TOKEN):]
conv_mode = "graphchat_v1"

conv = conv_templates[conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
inputs = tokenizer([prompt])

input_ids = torch.as_tensor(inputs.input_ids).cuda()

stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)

graph_data['graph_1'].graph_node = graph_data['graph_1'].graph_node.to(torch.float16)
graph_data['graph_2'].graph_node = graph_data['graph_2'].graph_node.to(torch.float16)

graph_data['graph_1'] = graph_data['graph_1'].cuda()
graph_data['graph_2'] = graph_data['graph_2'].cuda()
with torch.inference_mode():

    output_ids = model.generate(
        input_ids,
        graph_data=graph_data,
        do_sample=True,
        temperature=0.2,
        max_new_tokens=1024,
        stopping_criteria=[stopping_criteria])

input_token_len = input_ids.shape[1]
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
if n_diff_input_output > 0:
    print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
outputs = outputs.strip()
if outputs.endswith(stop_str):
    outputs = outputs[:-len(stop_str)]
    outputs = outputs.strip()

print(outputs)



