use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex};  

struct LRUCache {  
    capacity: usize,  
    cache: Arc<Mutex<HashMap<String, String>>>,  
    // TODO: 添加访问顺序跟踪字段  
    order: Arc<Mutex<VecDeque<String>>>,
}  

impl LRUCache {  
    fn new(capacity: usize) -> Self {
        LRUCache {
            capacity,
            cache: Arc::new(Mutex::new(HashMap::with_capacity(capacity))),
            order: Arc::new(Mutex::new(VecDeque::with_capacity(capacity))),
        }
    }

    fn get(&self, key: &str) -> Option<String> {  
        // TODO: 更新访问顺序  
        let mut cache = self.cache.lock().unwrap();
        let mut order = self.order.lock().unwrap();
        
        if let Some(val) = cache.get(key) {
            // 更新访问顺序
            if let Some(pos) = order.iter().position(|k| k == key) {
                order.remove(pos);
            }
            order.push_back(key.to_string());
            Some(val.clone())
        } else {
            None
        }
    }  

    fn put(&self, key: String, value: String) {
        let mut cache = self.cache.lock().unwrap();
        let mut order = self.order.lock().unwrap();
        
        if cache.len() >= self.capacity {
//        if order.len() >= self.capacity {
            if let Some(oldest) = order.pop_front() {
                cache.remove(&oldest);
            }
        }

        if cache.contains_key(&key) {
            // 更新现有key的位置
            if let Some(pos) = order.iter().position(|k| k == &key) {
                order.remove(pos);
            }
        }
            
        cache.insert(key.clone(), value);
        order.push_back(key);
    }
}  

#[test]  
fn test_thread_safe_lru() {  
    let cache = Arc::new(LRUCache::new(2));  
    // 多线程读写测试（学生需设计）  

    
    let threads = (0..5).map(|i| {
        let cache = Arc::clone(&cache);
        std::thread::spawn(move || {
            match i {
                0 => cache.put("key0".into(), "val0".into()),
                1 => cache.put("key1".into(), "val1".into()),
                2 => { 
                    let _ = cache.get("key0"); // 访问key0使其成为最近使用
                    println!("Accessed key0");
                },
                3 => {
                    println!("Inserting key2");
                    cache.put("key2".into(), "val2".into()); // 应淘汰key1
                },
                4 => {
                    println!("Checking keys");
                    assert!(cache.get("key0").is_some(), "key0 should exist");
                    assert!(cache.get("key2").is_some(), "key2 should exist");
                },
                _ => unreachable!()
            }
        })
    });
    
    for t in threads {
        t.join().unwrap();
    }
    
    let cache_guard = cache.cache.lock().unwrap();
    assert!(cache_guard.contains_key("key0"), "key0 should remain");
    assert!(!cache_guard.contains_key("key1"), "key1 should be evicted");
    assert!(cache_guard.contains_key("key2"), "key2 should exist");
    assert_eq!(cache_guard.len(), 2);
}  

// 添加main函数
fn main() {
    let cache = Arc::new(LRUCache::new(3));
    
    cache.put("k1".into(), "v1".into()); // 插入k1
    cache.put("k2".into(), "v2".into()); // 插入k2（缓存满）
    cache.put("k3".into(), "v3".into()); // 应淘汰k1
    
    println!("=== 最终状态 ===");
    println!("k1: {:?}", cache.get("k1")); // 预期: None
    println!("k2: {:?}", cache.get("k2")); // 预期: Some("v2")
    println!("k3: {:?}", cache.get("k3")); // 预期: Some("v3")
    
    // 添加调试输出验证内部状态
    let cache_data = cache.cache.lock().unwrap();
    let order_data = cache.order.lock().unwrap();
    println!("\n[调试信息]");
    println!("缓存内容: {:?}", *cache_data);
    println!("访问顺序: {:?}", *order_data);
}