import os
import time
import tempfile
import yaml
from multiprocessing import Event, Process

import logging


os.environ["LMCACHE_LOG_LEVEL"] = "DEBUG"
os.environ["LMCACHE_ENABLE_DETAILED_LOGGING"] = "True"

# Set environment variables before importing vLLM to avoid model config conflicts
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_HUB_OFFLINE"] = "1"

from lmcache.integration.vllm.utils import ENGINE_NAME
from lmcache.v1.cache_engine import LMCacheEngineBuilder

from vllm import LLM, SamplingParams
from vllm.config import KVTransferConfig

# P2P LMCache configuration
# Redis lookup server port (must match your Redis server)
redis_port = 8100
# Distributed server ports for each instance
distributed_port_1 = 8200
distributed_port_2 = 8201

# Use experimental features in LMCache
os.environ["LMCACHE_USE_EXPERIMENTAL"] = "True"
# LMCache is set to use 256 tokens per chunk
os.environ["LMCACHE_CHUNK_SIZE"] = "256"
# Enable local CPU backend in LMCache for P2P
os.environ["LMCACHE_LOCAL_CPU"] = "True"
# Set local CPU memory buffer limit to 5.0 GB
os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "5.0"
# Enable P2P sharing
os.environ["LMCACHE_ENABLE_P2P"] = "True"
# Set Redis lookup server URL
os.environ["LMCACHE_LOOKUP_URL"] = f"localhost:{redis_port}"

os.environ["PYTHONHASHSEED"] = "0"

prompts = [
    "Hello, how are you?" * 1000,
    "Nice to meet you. What's your name?" * 500
]

model_path = "/data1/DeepSeek-R1-Distill-Qwen-32B"

available_gpus = ["0,1", "2,3"]

def create_lmcache_config(instance_id, distributed_port, enable_disk=False, disk_path=None, force_disk=False):
    """Create LMCache configuration file for P2P sharing"""
    config = {
        "chunk_size": 256,
        "local_cpu": True,
        "max_local_cpu_size": 5.0,
        # P2P configuration
        "enable_p2p": True,
        "lookup_url": f"localhost:{redis_port}",
        "distributed_url": f"localhost:{distributed_port}",
    }
    
    if enable_disk and disk_path:
        config["local_disk"] = disk_path
        config["max_local_disk_size"] = 10.0
        
        if force_disk:
            config["max_local_cpu_size"] = 1 
            print(f"Force disk mode enabled: CPU cache limited to {config['max_local_cpu_size']} GB")
    
    # Create temporary config file
    config_file = tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False)
    yaml.dump(config, config_file)
    config_file.close()
    return config_file.name


def run_store_with_performance_monitoring(store_done, prompts, enable_disk=False, disk_path=None, force_disk=False):
    """Run the first vLLM instance that stores KV cache with performance monitoring"""
    os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus[0]
    
    # Create P2P config for instance 1
    config_file = create_lmcache_config("instance1", distributed_port_1, enable_disk, disk_path, force_disk)
    os.environ["LMCACHE_CONFIG_FILE"] = config_file
    
    sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10)

    ktc = KVTransferConfig(kv_connector="LMCacheConnectorV1", kv_role="kv_both")
    llm = LLM(
        model=model_path,
        kv_transfer_config=ktc,
        max_model_len=13000,
        gpu_memory_utilization=0.78,
        enforce_eager=True,
        tensor_parallel_size=2,
        enable_prefix_caching= False
    )

    
    start_time = time.perf_counter()
    outputs = llm.generate(prompts, sampling_params)
    outputs = llm.generate(prompts, sampling_params)
    end_time = time.perf_counter()
    
    for output in outputs:
        generated_text = output.outputs[0].text
        print(f"Generated text: {generated_text!r}")
    
    print(f"KV cache store completed in {end_time - start_time:.3f} seconds")
    print("KV cache store is finished.")
    store_done.set()

    # Clean up lmcache backend
    LMCacheEngineBuilder.destroy(ENGINE_NAME)
    
    # Clean up config file
    os.unlink(config_file)


def run_retrieve_with_performance_monitoring(store_done, prompts, timeout=2, enable_disk=False, disk_path=None, force_disk=False):
    """Run the second vLLM instance that retrieves KV cache via P2P with performance monitoring"""
    os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus[1]
    
    # Create P2P config for instance 2
    config_file = create_lmcache_config("instance2", distributed_port_2, enable_disk, disk_path, force_disk)
    os.environ["LMCACHE_CONFIG_FILE"] = config_file
    
    sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10)

    ktc = KVTransferConfig(kv_connector="LMCacheConnectorV1", kv_role="kv_both")
    llm = LLM(
        model=model_path,
        kv_transfer_config=ktc,
        max_model_len=13000,
        gpu_memory_utilization=0.78,
        enforce_eager=True,
        tensor_parallel_size=2,
        enable_prefix_caching= False
    )

    print("Waiting for KV cache store to finish...")
    store_done.wait()
    time.sleep(timeout) 
    
    start_time = time.perf_counter()
    outputs = llm.generate(prompts, sampling_params)
    end_time = time.perf_counter()
    
    for output in outputs:
        generated_text = output.outputs[0].text
        print(f"Generated text: {generated_text!r}")
    
    print(f"KV cache retrieve completed in {end_time - start_time:.3f} seconds")

    # Clean up lmcache backend
    LMCacheEngineBuilder.destroy(ENGINE_NAME)
    
    # Clean up config file
    os.unlink(config_file)
    
def test_forced_disk_cache_performance():

    disk_cache_path1 = tempfile.mkdtemp(prefix="lmcache_forced_disk1_")
    print(f"Created forced disk cache directory: {disk_cache_path1}")
    disk_cache_path2 = tempfile.mkdtemp(prefix="lmcache_forced_disk2_")
    print(f"Created forced disk cache directory: {disk_cache_path2}")
    
    store_done = Event()
    
    # Create processes with force_disk=True
    store_process = Process(target=run_store_with_performance_monitoring, 
                           args=(store_done, prompts, True, disk_cache_path1, True))
    retrieve_process = Process(target=run_retrieve_with_performance_monitoring, 
                              args=(store_done, prompts, 2, True, disk_cache_path2, True))

    try:
        store_process.start()
        retrieve_process.start()

        store_process.join()
        retrieve_process.join()

    except KeyboardInterrupt:
        print("\nInterrupted by user. Cleaning up...")
    except Exception as e:
        print(f"Error occurred: {e}")
    finally:
        # Clean up the processes
        if store_process.is_alive():
            store_process.terminate()
        if retrieve_process.is_alive():
            retrieve_process.terminate()
        
        import shutil
        try:
            shutil.rmtree(disk_cache_path1)
            print(f"Cleaned up forced disk cache directory: {disk_cache_path1}")
            shutil.rmtree(disk_cache_path2)
            print(f"Cleaned up forced disk cache directory: {disk_cache_path2}")
        except Exception as e:
            print(f"Failed to clean up forced disk cache directory: {e}")


def check_redis_connection():
    """Check if Redis server is running"""
    try:
        import redis
        r = redis.Redis(host='localhost', port=redis_port, socket_connect_timeout=1)
        r.ping()
        print(f"✓ Redis server is running on port {redis_port}")
        return True
    except Exception as e:
        print(f"✗ Redis server is not running on port {redis_port}")
        print(f"Error: {e}")
        return False


def main():

    if not check_redis_connection():
        print("\nPlease start Redis server first:")
        return
    
    try:
        import redis
        r = redis.Redis(host='localhost', port=redis_port, socket_connect_timeout=5)
        r.flushall()
    except Exception as e:
        print(f"✗ Fail to clear Redis: {e}")
    
    test_forced_disk_cache_performance()
    


if __name__ == "__main__":
    main() 