#include "logger_api.hpp"
#include "core/async_worker.hpp"
#include "core/log_context.hpp"  
#include "utils/context_utils.hpp"
#include <iostream>   // 初始化错误输出
#include <stdexcept>  // 用于抛出异常 
#include <chrono>
#include <nlohmann/json.hpp>

// 构造函数：初始化所有成员，并启动异步工作器
Logger::Logger(std::shared_ptr<IFormatter> formatter,
               const std::vector<std::shared_ptr<ISink>>& sinks,
               size_t main_queue_size,
               size_t fallback_queue_size,
               std::chrono::milliseconds flush_interval)
    : m_sinks(sinks)
    , m_formatter(std::move(formatter))
    , m_currentLevel(LogLevel::Info) {
    
    if (!m_formatter) {
        throw std::invalid_argument("Logger: Formatter cannot be null.");
    }

    // 创建队列
    m_mainQueue = std::make_unique<moodycamel::ConcurrentQueue<LogEntry>>(main_queue_size);
    m_fallbackQueue = std::make_unique<BlockingQueue<LogEntry>>(fallback_queue_size);

    // 创建并启动 AsyncWorker
    m_asyncWorker = std::make_unique<AsyncWorker>(*m_mainQueue, *m_fallbackQueue, m_formatter, flush_interval);

    // 将所有Sink添加到AsyncWorker
    for (const auto& sink : m_sinks) {
        m_asyncWorker->addSink(sink);
    }
}


// Logger 析构函数
// 在析构前先尝试dump回溯日志
Logger::~Logger() {
    if (m_asyncWorker) {
        // 尝试在AsyncWorker 停止前dump回溯日志
        dumpBacktrace(); 
        // 这里不应该由主线程来处理刷新剩余日志，而是给异步消费者发个信息，让它处理
        m_asyncWorker.reset(); // 触发AsyncWorker的析构
    }
    if (m_fallbackQueue) {
        m_fallbackQueue->shutdown();
    }
    std::cerr << "Logger instance destroyed." << std::endl;
}


void Logger::setLevel(LogLevel level) {
    m_currentLevel.store(level, std::memory_order_relaxed);
}


bool Logger::shouldLog(LogLevel level) const {
    LogLevel current = m_currentLevel.load(std::memory_order_relaxed);
    return current != LogLevel::Off && static_cast<int>(level) >= static_cast<int>(current); 
}


std::string Logger::getFormatterType() {
    return m_formatter->getName();
}


void Logger::enableBacktrace(size_t size) {
    std::lock_guard<std::mutex> lock(m_backtraceMutex);
    m_backtraceBufferSize = size;
    m_backtraceEnabled = (size > 0);
    m_backtraceBuffer.clear();
}


void Logger::dumpBacktrace() {
    if (!m_backtraceEnabled || m_backtraceBuffer.empty()) {
        std::cerr << "Backtrace logging is not enabled or buffer is empty." << std::endl;
        return;
    }

    std::lock_guard<std::mutex> lock(m_backtraceMutex);
    std::cerr << "\n--- LOG BACKTRACE START (" << m_backtraceBuffer.size() << " entries) ---" << std::endl;

    for (const auto& entry : m_backtraceBuffer) {
        if (m_formatter) {
            std::cerr << m_formatter->format(entry) << std::endl;
        }
        else {
            std::cerr << "[" << static_cast<int>(entry.level) << "]" << entry.message << std::endl;
        }
    }
    std::cerr << "--- LOG BACKTRACE END ---" << std::endl;
}


void Logger::flush() {
    if (m_asyncWorker) {
        m_asyncWorker->flush();
    }
}

void Logger::log(LogLevel level, const std::string& message, const nlohmann::json& context) {

    if (shouldLog(level)) {

        LogEntry entry;
        entry.level = level;
        entry.message = message;
        entry.timestamp = std::chrono::system_clock::now();
        if (entry.context) {
            *entry.context = context; // 存储结构化上下文
        }
        entry.thread_id = axon_logger::utils::context::get_thread_id();
        entry.trace_id = LogContextManager::get_trace_id();
        entry.span_id = LogContextManager::get_span_id();

        // --- 添加日志回溯到缓冲区 ---
        if (m_backtraceEnabled) {
            handle_backtrace(entry);
        }
        // --- 结束回溯缓冲区处理 ---


        if (m_asyncWorker) {
            enqueue_log(std::move(entry));
        }    
        else { // 如果异步worker未启动，则同步日志
            log_sync(entry);
        }    
    }
}    


void Logger::handle_backtrace(const LogEntry& entry) {
    std::lock_guard<std::mutex> lock(m_backtraceMutex);
    m_backtraceBuffer.push_back(entry);
    if (m_backtraceBuffer.size() > m_backtraceBufferSize) {
        m_backtraceBuffer.pop_front();
    }
}

void Logger::enqueue_log(LogEntry&& entry) {
    if (!m_mainQueue->try_enqueue(std::move(entry))) {
        // 如果主队列满了，尝试推入备用阻塞队列
        try {
            m_fallbackQueue->enqueue(std::move(entry)); // 这里用的副本
        } catch (const std::runtime_error& e) {
            std::cerr << "Error: Log fallback queue Shutting down, log droped: " << e.what() << " - " << entry.message << std::endl;
        }
    } 
    else {
        // 成功入主队，唤醒消费者
        m_asyncWorker->notify_consumer();
    }
}

void Logger::log_sync(const LogEntry& entry) {
    std::string formatted_message = m_formatter->format(entry);
    for (const auto& sink : m_sinks) {
        if (sink) {
            sink->log(entry.level, formatted_message);
        }
    }  
}

    
// 日志级别方法简易实现
void Logger::trace(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Trace, message, context);
}

void Logger::debug(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Debug, message, context);
}

void Logger::info(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Info, message, context);
}

void Logger::warn(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Warn, message, context);
}

void Logger::error(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Error, message, context);
}

void Logger::critical(const std::string& message, const nlohmann::json& context) {
    log(LogLevel::Critical, message, context);
}



