// 流式消息处理示例
//
// 注意：这个示例演示了如何使用流式消息处理功能，但需要完整实现streaming模块才能运行。
// 目前，streaming模块已设计并开发完成，但需要更新Cargo.toml以添加必要的依赖。

use std::any::Any;
use std::time::{Duration, Instant};
use async_trait::async_trait;
use tokio::time::sleep;

use protoactor_rs::actor::{Actor, ActorContext, ActorError, DefaultActorContext, Props};
use protoactor_rs::system::ActorSystem;
use protoactor_rs::streaming::{
    StreamActor, StreamFactory, StreamHandler, StreamItem, StreamProcessor, 
    StreamPublisher, StreamSubscriber, create_stream
};

// 测试数据项
#[derive(Debug, Clone)]
struct DataItem {
    id: usize,
    value: String,
    timestamp: i64,
}

// 数据处理结果
#[derive(Debug, Clone)]
struct ProcessedResult {
    item_id: usize,
    original_value: String,
    processed_value: String,
    processing_time_ms: u64,
}

// 数据生成器Actor
struct DataGeneratorActor {
    count: usize,
    batch_size: usize,
    publisher: Option<Box<dyn StreamPublisher<DataItem>>>,
}

#[async_trait]
impl Actor for DataGeneratorActor {
    type Context = DefaultActorContext;

    async fn started(&self, _ctx: &mut Self::Context) -> Result<(), ActorError> {
        println!("数据生成器Actor已启动，将生成 {} 个数据项", self.count);
        Ok(())
    }

    async fn receive(&self, ctx: &mut Self::Context, msg: Box<dyn Any + Send>) -> Result<(), ActorError> {
        if let Some(publisher) = &self.publisher {
            println!("开始生成并发布数据...");
            let start_time = Instant::now();
            let total_batches = (self.count + self.batch_size - 1) / self.batch_size;
            
            for batch in 0..total_batches {
                let start_idx = batch * self.batch_size;
                let end_idx = std::cmp::min(start_idx + self.batch_size, self.count);
                let mut batch_items = Vec::with_capacity(end_idx - start_idx);
                
                for id in start_idx..end_idx {
                    let item = DataItem {
                        id,
                        value: format!("Item-{}", id),
                        timestamp: std::time::SystemTime::now()
                            .duration_since(std::time::UNIX_EPOCH)
                            .unwrap()
                            .as_secs() as i64,
                    };
                    batch_items.push(item);
                }
                
                // 发布批次数据
                publisher.publish_all(batch_items).await
                    .map_err(|e| ActorError::Unknown(e.to_string()))?;
                
                println!("已发布批次 {}/{}: 项目 {} 到 {}", 
                    batch + 1, total_batches, start_idx, end_idx - 1);
                
                // 短暂延迟，避免过快发送
                sleep(Duration::from_millis(50)).await;
            }
            
            // 完成流
            publisher.complete().await
                .map_err(|e| ActorError::Unknown(e.to_string()))?;
            
            println!("数据生成完成! 总耗时: {:?}", start_time.elapsed());
        }
        
        Ok(())
    }
}

// 数据处理器Actor的处理器
struct DataProcessorHandler {
    processed_count: usize,
    start_time: Instant,
    results: Vec<ProcessedResult>,
}

impl DataProcessorHandler {
    fn new() -> Self {
        Self {
            processed_count: 0,
            start_time: Instant::now(),
            results: Vec::new(),
        }
    }
}

#[async_trait]
impl StreamHandler<DataItem> for DataProcessorHandler {
    async fn handle(&mut self, _ctx: &mut impl ActorContext, item: StreamItem<DataItem>) -> Result<(), ActorError> {
        // 模拟处理数据
        let process_start = Instant::now();
        
        // 如果数据项ID被10整除，睡眠一段时间模拟复杂处理
        if item.value.id % 10 == 0 {
            sleep(Duration::from_millis(20)).await;
        }
        
        // 创建处理结果
        let processed_value = format!("已处理: {}", item.value.value);
        let processing_time = process_start.elapsed().as_millis() as u64;
        
        let result = ProcessedResult {
            item_id: item.value.id,
            original_value: item.value.value.clone(),
            processed_value,
            processing_time_ms: processing_time,
        };
        
        // 保存结果
        self.results.push(result);
        
        // 增加计数
        self.processed_count += 1;
        
        // 每处理1000个项目输出一次状态
        if self.processed_count % 1000 == 0 {
            let elapsed = self.start_time.elapsed();
            let rate = self.processed_count as f64 / elapsed.as_secs_f64();
            
            println!("已处理 {} 个项目，速率: {:.2} 项/秒", 
                self.processed_count, rate);
        }
        
        Ok(())
    }
    
    async fn on_complete(&mut self, _ctx: &mut impl ActorContext) -> Result<(), ActorError> {
        let elapsed = self.start_time.elapsed();
        let rate = self.processed_count as f64 / elapsed.as_secs_f64();
        
        println!("\n流处理完成!");
        println!("总共处理: {} 个项目", self.processed_count);
        println!("总耗时: {:?}", elapsed);
        println!("平均处理速率: {:.2} 项/秒", rate);
        println!("平均每项处理时间: {:.2} 毫秒", 
            self.results.iter().map(|r| r.processing_time_ms as f64).sum::<f64>() / self.processed_count as f64);
        
        Ok(())
    }
    
    async fn on_error(&mut self, _ctx: &mut impl ActorContext, error: String) -> Result<(), ActorError> {
        println!("流处理错误: {}", error);
        Ok(())
    }
}

// 结果聚合Actor的处理器
struct ResultAggregatorHandler {
    group_results: std::collections::HashMap<usize, Vec<ProcessedResult>>,
    processed_groups: usize,
}

impl ResultAggregatorHandler {
    fn new() -> Self {
        Self {
            group_results: std::collections::HashMap::new(),
            processed_groups: 0,
        }
    }
}

#[async_trait]
impl StreamHandler<ProcessedResult> for ResultAggregatorHandler {
    async fn handle(&mut self, _ctx: &mut impl ActorContext, item: StreamItem<ProcessedResult>) -> Result<(), ActorError> {
        // 按ID除以100分组
        let group_id = item.value.item_id / 100;
        
        // 添加到相应组
        self.group_results.entry(group_id)
            .or_insert_with(Vec::new)
            .push(item.value.clone());
        
        // 如果组已满，处理并移除
        if self.group_results.get(&group_id).unwrap().len() >= 100 {
            let results = self.group_results.remove(&group_id).unwrap();
            
            // 计算组统计信息
            let avg_time = results.iter()
                .map(|r| r.processing_time_ms as f64)
                .sum::<f64>() / results.len() as f64;
            
            println!("完成组 #{}: {} 项, 平均处理时间: {:.2}ms", 
                group_id, results.len(), avg_time);
            
            self.processed_groups += 1;
        }
        
        Ok(())
    }
    
    async fn on_complete(&mut self, _ctx: &mut impl ActorContext) -> Result<(), ActorError> {
        println!("\n聚合处理完成!");
        println!("已处理 {} 个完整组", self.processed_groups);
        
        // 处理剩余的不完整组
        let remaining_groups = self.group_results.len();
        if remaining_groups > 0 {
            println!("有 {} 个不完整组:", remaining_groups);
            
            for (group_id, results) in &self.group_results {
                println!("  组 #{}: {} 项", group_id, results.len());
            }
        }
        
        Ok(())
    }
    
    async fn on_error(&mut self, _ctx: &mut impl ActorContext, error: String) -> Result<(), ActorError> {
        println!("聚合处理错误: {}", error);
        Ok(())
    }
}

// 主函数
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    println!("ProtoActor-RS 流式处理示例");
    
    // 创建Actor系统
    let system = ActorSystem::new()?;
    
    // 创建流工厂
    let stream_factory = StreamFactory::new(system.clone());
    
    // 创建数据流
    let data_stream: StreamProcessor<DataItem> = stream_factory.create_stream("data_stream", 1000).await;
    let publisher = data_stream.publisher();
    
    // 创建数据生成器Actor
    println!("创建数据生成器Actor...");
    let generator = DataGeneratorActor {
        count: 10000,
        batch_size: 100,
        publisher: Some(Box::new(publisher)),
    };
    
    let generator_props = Props::from_producer(move || generator);
    let generator_pid = system.spawn::<DataGeneratorActor>(generator_props).await?;
    
    // 创建数据处理器Actor
    println!("创建数据处理器Actor...");
    let processor_handler = DataProcessorHandler::new();
    let processor_pid = stream_factory.create_stream_actor(
        &data_stream.metadata().await.id,
        processor_handler,
        100
    ).await?;
    
    // 发送启动消息
    println!("开始流处理...");
    system.send(&generator_pid, Box::new("开始生成")).await?;
    
    // 等待处理完成
    println!("主线程等待处理完成...");
    sleep(Duration::from_secs(15)).await;
    
    // 停止Actors
    system.stop(&generator_pid).await?;
    system.stop(&processor_pid).await?;
    
    println!("流处理示例完成!");
    Ok(())
} 