use anyhow::Result;
use axum::{
    extract::{State},
    http::{Method, StatusCode},
    response::Json,
    routing::{post, get},
    Router,
};
use axum::extract::ws::{WebSocketUpgrade, WebSocket, Message};
use base64::Engine;
use dotenv::dotenv;
use futures::StreamExt;
use serde::{Deserialize, Serialize};
use std::env;
use std::sync::Arc;
use tower_http::cors::{Any, CorsLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use axum::serve;
use tokio::net::TcpListener;

mod operations;

// 应用状态
#[derive(Clone)]
struct AppState {
    translator: operations::translator::Translator,
    synthesizer: operations::speechsynthesis::SpeechSynthesizer,
}

// 请求和响应结构体
#[derive(Deserialize)]
struct TranslateRequest {
    text: String,
    source_lang: Option<String>,  // 源语言，可选，默认为auto
    target_lang: Option<String>,  // 目标语言，可选，默认为zh
}

#[derive(Serialize)]
struct TranslateResponse {
    translated_text: String,
}

#[derive(Deserialize)]
struct SynthesizeRequest {
    text: String,
}

#[derive(Serialize)]
struct SynthesizeResponse {
    message: String,
    audio_data: String,  // base64编码的音频数据
    audio_format: String, // 音频格式信息
}

#[tokio::main]
async fn main() -> Result<()> {
    // 初始化日志
    tracing_subscriber::registry()
        .with(tracing_subscriber::EnvFilter::new(
            std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()),
        ))
        .with(tracing_subscriber::fmt::layer())
        .init();

    dotenv().ok();
    
    // 初始化翻译功能
    let secret_id = env::var("TENCENT_SECRET_ID").expect("TENCENT_SECRET_ID not set");
    let secret_key = env::var("TENCENT_SECRET_KEY").expect("TENCENT_SECRET_KEY not set");
    let translator = operations::translator::Translator::new(secret_id, secret_key);

    // 初始化语音合成功能
    let app_id = env::var("XFYUN_APP_ID").expect("XFYUN_APP_ID not set");
    let api_key = env::var("XFYUN_API_KEY").expect("XFYUN_API_KEY not set");
    let api_secret = env::var("XFYUN_API_SECRET").expect("XFYUN_API_SECRET not set");
    let synthesizer = operations::speechsynthesis::SpeechSynthesizer::new(app_id, api_key, api_secret);

    // 创建应用状态
    let state = Arc::new(AppState {
        translator,
        synthesizer,
    });

    // 配置CORS
    let cors = CorsLayer::new()
        .allow_methods([Method::GET, Method::POST, Method::OPTIONS])
        .allow_headers([axum::http::header::CONTENT_TYPE])
        .allow_origin(Any);

    // 创建路由
    let app = Router::new()
        .route("/translate", post(translate_handler))
        .route("/synthesize", post(synthesize_handler))
        .route("/ws_tts", get(ws_tts_handler))
        .layer(cors)
        .with_state(state);

    let port = env::var("PORT").unwrap_or_else(|_| "3001".to_string());
    let addr = format!("127.0.0.1:{}", port);
    tracing::info!("服务器启动在 http://{}", addr);
    let listener = TcpListener::bind(&addr).await?;
    serve(listener, app).await?;

    Ok(())
}

// 翻译处理器
async fn translate_handler(
    State(state): State<Arc<AppState>>,
    Json(payload): Json<TranslateRequest>,
) -> Result<Json<TranslateResponse>, StatusCode> {
    let source_lang = payload.source_lang.unwrap_or_else(|| "auto".to_string());
    let target_lang = payload.target_lang.unwrap_or_else(|| "zh".to_string());
    
    match state.translator.translate_text_with_langs(&payload.text, &source_lang, &target_lang).await {
        Ok(translated_text) => Ok(Json(TranslateResponse { translated_text })),
        Err(e) => {
            tracing::error!("翻译失败: {}", e);
            Err(StatusCode::INTERNAL_SERVER_ERROR)
        }
    }
}

// 语音合成处理器
async fn synthesize_handler(
    State(state): State<Arc<AppState>>,
    Json(payload): Json<SynthesizeRequest>,
) -> Result<Json<SynthesizeResponse>, StatusCode> {
    match state.synthesizer.synthesize_to_wav_bytes(&payload.text).await {
        Ok(wav_data) => {
            let audio_base64 = base64::engine::general_purpose::STANDARD.encode(&wav_data);
            Ok(Json(SynthesizeResponse {
                message: "语音合成成功".to_string(),
                audio_data: audio_base64,
                audio_format: "audio/wav".to_string(),
            }))
        },
        Err(e) => {
            tracing::error!("语音合成失败: {}", e);
            Err(StatusCode::INTERNAL_SERVER_ERROR)
        }
    }
}

// ws_tts WebSocket处理器
async fn ws_tts_handler(
    ws: WebSocketUpgrade,
    State(state): State<Arc<AppState>>,
) -> impl axum::response::IntoResponse {
    ws.on_upgrade(move |socket| handle_tts_ws(socket, state))
}

async fn handle_tts_ws(mut socket: WebSocket, state: Arc<AppState>) {
    // 1. 等待前端发来要合成的文本
    let Some(Ok(Message::Text(text))) = socket.recv().await else { return; };

    // 2. 调用流式TTS，边收到边推送
    let mut tts_stream = match state.synthesizer.stream_pcm_frames(&text).await {
        Ok(stream) => stream,
        Err(e) => {
            let _ = socket.send(Message::Text(format!("TTS错误: {}", e))).await;
            return;
        }
    };

    while let Some(frame) = tts_stream.next().await {
        match frame {
            Ok(pcm_chunk) => {
                // 直接推送二进制PCM帧
                if socket.send(Message::Binary(pcm_chunk)).await.is_err() {
                    break;
                }
            }
            Err(e) => {
                let _ = socket.send(Message::Text(format!("TTS流错误: {}", e))).await;
                break;
            }
        }
    }
    // 关闭WebSocket
    let _ = socket.close().await;
}
