import gc
import torch
import os
import asyncio
import json
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.inputs import TokensPrompt
from uuid import uuid4
from agentic_system.environments.prompts.swe_bench_memory import *
from jinja2 import Template
from dataclasses import dataclass, asdict

os.environ["VLLM_USE_V1"]="1"

from vllm import LLM, SamplingParams
from vllm.v1.engine.async_llm import AsyncLLM
from vllm.distributed.parallel_state import (destroy_distributed_environment,
                                             destroy_model_parallel)
from tokenizers import Tokenizer
from transformers import AutoTokenizer


def clean_up():
    destroy_model_parallel()
    destroy_distributed_environment()
    gc.collect()
    torch.npu.empty_cache()

prompts = [
    "Hello, my name is",
    "The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# llm = LLM(model="/mnt/nas/huggingface_hub/models--Qwen--Qwen2.5-32B-Instruct",
#           tensor_parallel_size=4,
#           distributed_executor_backend="mp",
#           max_model_len=4096)
engine_args = AsyncEngineArgs(
            model='/mnt/nas/huggingface_hub/models--Qwen--Qwen2.5-32B-Instruct', served_model_name=None, tokenizer=None, hf_config_path=None, task='auto', skip_tokenizer_init=False, enable_prompt_embeds=False, tokenizer_mode='auto', trust_remote_code=False, allowed_local_media_path='', download_dir=None, load_format='auto', config_format='auto', dtype='bfloat16', kv_cache_dtype='auto', seed=0, max_model_len=10048, cuda_graph_sizes=[512], 
            pipeline_parallel_size=1, tensor_parallel_size=8, data_parallel_size=1, data_parallel_size_local=None, data_parallel_address=None, data_parallel_rpc_port=None, data_parallel_backend='mp', enable_expert_parallel=False, max_parallel_loading_workers=None, block_size=None, enable_prefix_caching=False, prefix_caching_hash_algo='builtin', disable_sliding_window=False, disable_cascade_attn=False, use_v2_block_manager=True, swap_space=4, cpu_offload_gb=0, gpu_memory_utilization=0.75, max_num_batched_tokens=8192, max_num_partial_prefills=1, max_long_partial_prefills=1, long_prefill_token_threshold=0, max_num_seqs=1024, max_logprobs=20, disable_log_stats=True, revision=None, code_revision=None, rope_scaling={}, rope_theta=None, hf_token=None, hf_overrides={}, tokenizer_revision=None, quantization=None, enforce_eager=False, max_seq_len_to_capture=8192, disable_custom_all_reduce=True, tokenizer_pool_size=0, tokenizer_pool_type='ray', tokenizer_pool_extra_config={}, limit_mm_per_prompt={}, mm_processor_kwargs=None, disable_mm_preprocessor_cache=False, enable_lora=False, enable_lora_bias=False, max_loras=1, max_lora_rank=16, fully_sharded_loras=False, max_cpu_loras=None, lora_dtype='auto', lora_extra_vocab_size=256, long_lora_scaling_factors=None, enable_prompt_adapter=False, max_prompt_adapters=1, max_prompt_adapter_token=0, device='auto', num_scheduler_steps=1, multi_step_stream_outputs=True, ray_workers_use_nsight=False, num_gpu_blocks_override=None, num_lookahead_slots=0, model_loader_extra_config={}, ignore_patterns=None, preemption_mode=None, scheduler_delay_factor=0.0, enable_chunked_prefill=True, disable_chunked_mm_input=False, disable_hybrid_kv_cache_manager=False, guided_decoding_backend='auto', guided_decoding_disable_fallback=False, guided_decoding_disable_any_whitespace=False, guided_decoding_disable_additional_properties=False, logits_processor_pattern=None, speculative_config=None, qlora_adapter_name_or_path=None, show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None, disable_async_output_proc=False, scheduling_policy='fcfs', scheduler_cls='vllm.core.scheduler.Scheduler', override_neuron_config={}, override_pooler_config=None, 
            # compilation_config=json.dumps({"level":0,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":[],"use_inductor":True,"compile_sizes":None,"inductor_compile_config":{},"inductor_passes":{},"use_cudagraph":True,"cudagraph_num_of_warmups":0,"cudagraph_capture_sizes":None,"cudagraph_copy_inputs":False,"full_cuda_graph":False,"max_capture_size":None,"local_cache_dir":None}), 
                                          worker_cls='auto', worker_extension_cls='', kv_transfer_config=None, kv_events_config=None, generation_config='auto', enable_sleep_mode=True, override_generation_config={'n': 1, 'logprobs': 0, 'repetition_penalty': 1.0, 'max_new_tokens': 2048, 'temperature': 1.0, 'top_k': -1, 'top_p': 1, 'ignore_eos': False}, model_impl='auto', calculate_kv_scales=False, additional_config={}, enable_reasoning=None, reasoning_parser='', use_tqdm_on_load=True, pt_load_map_location='cpu', enable_multimodal_encoder_data_parallel=False, disable_log_requests=False
        )

tokenizer = AutoTokenizer.from_pretrained("/mnt/nas/huggingface_hub/models--Qwen--Qwen2.5-32B-Instruct")

args = asdict(engine_args)
args.pop("disable_log_requests")

llm = LLM(**args)
# llm = LLM(model='/mnt/nas/huggingface_hub/models--Qwen--Qwen2.5-32B-Instruct')

task = """viewcode creates pages for epub even if `viewcode_enable_epub=False` on `make html epub`
**Describe the bug**
viewcode creates pages for epub even if `viewcode_enable_epub=False` on `make html epub`

**To Reproduce**
```
$ make html epub
```

**Expected behavior**
module pages should not be created for epub by default.

**Your project**
No

**Screenshots**
No

**Environment info**
- OS: Mac
- Python version: 3.9.1
- Sphinx version: HEAD of 3.x
- Sphinx extensions:  sphinx.ext.viewcode
- Extra tools: No

**Additional context**
No


"""
memory = "尚无记忆内容"
observation = "本轮为第一轮，无执行结果"

messages = []
messages.append({"role":"system","content":SYSTEM_TEMPLATE})
messages.append({"role":"user","content":Template(instance_mem_template).render(task=task,
                                                                            memory=memory,
                                                                            obvervation=observation,
                                                                            **{},
                                                                            **os.environ)})

def gen():
    # outputs = llm.generate(prompts, sampling_params)
    prompt_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
    prompt_ids = [59479 if ids == 1019 else ids for ids in prompt_ids]
    prompt_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 429, 646, 653, 4113, 13, 151645, 198, 151644, 872, 271, 2610, 525, 458, 15235, 17847, 25530, 304, 14719, 1495, 6351, 9079, 323, 23163, 29402, 11293, 13, 5209, 25470, 1795, 279, 2701, 7354, 323, 3561, 8502, 311, 1882, 1196, 9079, 13, 15692, 33621, 87896, 6262, 9608, 87896, 198, 87896, 1474, 5430, 25, 87896, 1651, 1851, 11450, 6816, 369, 63949, 1496, 421, 1565, 1050, 1851, 18988, 12476, 392, 5608, 63, 389, 1565, 6927, 5272, 63949, 3989, 334, 74785, 279, 9876, 59479, 1050, 1851, 11450, 6816, 369, 63949, 1496, 421, 1565, 1050, 1851, 18988, 12476, 392, 5608, 63, 389, 1565, 6927, 5272, 63949, 63, 871, 334, 1249, 3321, 47845, 59479, 73594, 319, 3, 1281, 5272, 63949, 319, 73594, 871, 334, 18896, 7709, 59479, 4352, 6816, 1265, 537, 387, 3465, 369, 63949, 553, 1638, 17825, 334, 7771, 2390, 59479, 2753, 871, 334, 91996, 34413, 59479, 2753, 871, 334, 12723, 3546, 59479, 12, 10085, 25, 7401, 319, 12, 13027, 2319, 25, 220, 18, 13, 24, 13, 16, 319, 12, 94193, 2319, 25, 33080, 315, 220, 18, 1993, 319, 12, 94193, 19721, 25, 220, 41722, 20014, 9220, 3792, 1851, 319, 12, 24992, 7375, 25, 2308, 871, 334, 29019, 2266, 59479, 2753, 201, 1022, 87896, 21291, 17097, 30928, 5714, 25, 87896, 4710, 87896, 20294, 13850, 25, 87896, 58230, 248, 42192, 101376, 43815, 271, 87896, 28892, 22452, 87896, 198, 16, 13, 87896, 38687, 25, 87896, 37427, 2986, 279, 1482, 3383, 25398, 11, 2500, 1995, 320, 16169, 3681, 16275, 323, 13850, 701, 323, 1128, 3880, 311, 387, 2814, 1790, 13, 9645, 697, 7274, 1882, 2878, 279, 366, 26865, 29, 14082, 624, 17, 13, 87896, 2512, 25, 87896, 1416, 279, 1482, 3019, 7460, 30220, 264, 29402, 3210, 11, 1992, 72363, 3243, 87896, 429, 3210, 2878, 23725, 1182, 35078, 4766, 279, 366, 1311, 29, 14082, 13, 576, 4453, 3383, 374, 6509, 5499, 4583, 979, 279, 1156, 1555, 315, 279, 3210, 594, 2550, 374, 364, 81969, 26315, 21767, 17282, 18330, 75872, 23981, 23569, 18, 13, 87896, 4289, 13850, 25, 87896, 29901, 892, 1376, 1995, 3880, 311, 387, 34263, 369, 279, 1790, 4778, 320, 68, 1302, 2572, 1482, 5098, 11, 7907, 3059, 11, 4573, 1584, 568, 8116, 5612, 551, 432, 1667, 63594, 1376, 19083, 13530, 476, 5810, 4128, 11, 323, 1992, 432, 2878, 279, 366, 17269, 29, 14082, 13, 72363, 9112, 25, 87896, 1096, 374, 279, 1172, 6094, 4938, 1948, 498, 323, 279, 1790, 16230, 26, 4486, 21129, 6200, 1995, 382, 87896, 5097, 15042, 25, 87896, 198, 7771, 2033, 1969, 387, 72363, 6627, 398, 323, 21063, 87896, 304, 279, 2701, 3561, 13, 55547, 902, 1008, 2213, 476, 40841, 525, 5420, 1447, 13708, 766, 397, 7771, 7274, 1882, 1112, 320, 68, 1302, 2572, 1128, 3019, 279, 3383, 702, 61816, 311, 26, 279, 5795, 315, 279, 1482, 3019, 26, 3170, 3151, 2213, 304, 13850, 374, 1660, 6049, 26, 3170, 419, 3151, 3210, 374, 11882, 340, 522, 26865, 397, 27, 1311, 397, 73594, 21384, 198, 785, 3175, 12528, 3210, 311, 9026, 5023, 198, 13874, 3989, 522, 1311, 397, 13748, 4731, 397, 16196, 4938, 2213, 320, 68, 1302, 2572, 1537, 11946, 25, 15233, 1034, 362, 11, 1482, 11123, 25, 1882, 1034, 425, 11, 7907, 10931, 25, 508, 3006, 1566, 3909, 2546, 522, 17269, 397, 87896, 12925, 87896, 198, 13159, 421, 279, 3383, 4977, 4285, 11, 498, 72363, 24812, 87896, 728, 1526, 279, 7274, 3019, 624, 785, 2213, 304, 13850, 1265, 387, 72363, 40446, 1064, 11, 32930, 11, 323, 5390, 87896, 369, 17420, 7354, 624, 4854, 2033, 4778, 646, 1172, 6923, 72363, 603, 87896, 29402, 3210, 624, 7308, 979, 279, 3383, 374, 72363, 874, 49168, 27956, 87896, 1265, 279, 3210, 2550, 364, 81969, 26315, 21767, 17282, 18330, 75872, 23981, 6, 389, 1181, 1156, 1555, 624, 5404, 537, 1401, 1182, 518, 13656, 21276, 26, 1172, 8300, 311, 279, 3681, 4778, 594, 2550, 323, 279, 1482, 13850, 13, 151645, 198, 151644, 77091, 198]
    prompt_obj = TokensPrompt(
        prompt_token_ids=prompt_ids,
        multi_modal_data=None
    )
    
    print(f"prompt ids = {prompt_ids}")
    
    # generator1 = llm.generate(messages, sampling_params=sampling_params, request_id=uuid4().hex)
    # final_res1: Optional[RequestOutput] = None
    # async for output in generator1:
    #     final_res1 = output
    # assert final_res1 is not None
    
    final_res = llm.generate(prompts=prompt_obj, sampling_params=sampling_params)
    # final_res: Optional[RequestOutput] = None
    # async for output in generator:
    #     final_res = output
    # assert final_res is not None

    print(f"@@@@@@@@@@@@@ \n Prompt: Hello, my name is, Generated text:\n{final_res[0].outputs[0].text}\n@@@@@@@@@@@@@@@@@")
    # print(f"Prompt: Hello, my name is, Generated text: {final_res1.outputs[0].text}")
    return (final_res[0].outputs[0].text, 
            # final_res1.outputs[0].text,
            "dqqq"
            )

def free():
    # llm.reset_prefix_cache()
    llm.sleep()
    llm.wake_up()
    # llm.reset_prefix_cache()

t = gen()
free()
t = gen()

del llm
clean_up()

print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n")
print(t[0])
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n")
print(t[1])
