import { StreamConfigurations } from "./pc";
// For throttled audio requests
let audioRequestQueue: string[] = [];
let isProcessingQueue = false;
let streamIsDone = false;

let totalAudioDuration = 0;
let completionPromise: {
    resolve: (value: { status: boolean; duration: number }) => void;
    reject: (reason?: any) => void;
} | null = null;
// 顶部全局变量
let llmStartTime: number | null = null;
let llmAborted = false;

// 添加全局的 AbortController 用于音频和上传请求
export let audioFetchController: AbortController | null = null;
export let uploadFetchController: AbortController | null = null;

// 添加标志来跟踪第一次音频上传是否完成
let firstAudioUploadCompleted = false;

// 读取音频时长的函数
async function readAudioDuration(file: File) {
    const arrayBuffer = await file.arrayBuffer();
    const audioContext = new AudioContext();
    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
    return audioBuffer.duration; // 返回音频时长（秒）
}

// 上传音频文件和时长的函数
async function uploadAudioFile(file: File) {
    // 新增AbortController 
    uploadFetchController = new AbortController();

    // 过滤出有 url 和 sessionid 的配置项
    const StreamConfigs = StreamConfigurations.filter(config =>
        config.url && config.sessionid && config.url.length > 0
    );

    const uploadPromises = StreamConfigs.map(async (config) => {
        const formData = new FormData();
        formData.append('file', file);
        formData.append('sessionid', config.sessionid!.toString());

        try {
            const response = await fetch(`${config.url}/humanaudio`, {
                method: 'POST',
                body: formData,
                signal: uploadFetchController!.signal
            });

            if (response.ok) return { configId: config.id, success: true };
            else return { configId: config.id, success: false, error: response.statusText };

        } catch (error) {
            return { configId: config.id, success: false, error: error };
        }
    });

    try {
        const results = await Promise.all(uploadPromises);
        // 输出详细结果
        results.forEach(result => {
            if (result.success) console.log(`✓ ${result.configId} 上传成功`);
            else console.log(`✗ ${result.configId} 上传失败:`, result.error);
        });

        // 标记第一次音频上传完成
        firstAudioUploadCompleted = true;

        return results;
    } catch (error) {
        console.error('并发音频上传过程中发生错误:', error);
        throw error;
    }
}

export const AudioTimeFile = async (message: string): Promise<number> => {
    if (message.length == 0) return 0;
    try {
        audioFetchController = new AbortController();
        const response = await fetch(`${process.env.NEXT_PUBLIC_API_URL_TTS}/tts?text=${message}&streaming_mode=${false}&text_lang=zh&ref_audio_path=${process.env.NEXT_PUBLIC_API_URL_TTS_FILE_UTL}&prompt_lang=zh&prompt_text=${process.env.NEXT_PUBLIC_API_URL_TTS_FILE_CONTENT}&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=false`, { signal: audioFetchController.signal });
        if (!response.ok) {
            console.error('音频上传失败:', response);
        }
        const blob = await response.blob();
        const audioFile = new File([blob], 'audio.mp3', { type: 'audio/mpeg' });
        // 上传音频文件
        await uploadAudioFile(audioFile)
        const duration = await readAudioDuration(audioFile);
        return duration;
    } catch (error) {
        console.error('您的 fetch 操作出现问题:', error);
        return 0;
    }
}


const processAudioQueue = async () => {
    if (audioRequestQueue.length === 0) {
        isProcessingQueue = false;
        if (streamIsDone && completionPromise) {
            completionPromise.resolve({ status: true, duration: totalAudioDuration });
            completionPromise = null;
        }
        return;
    }
    isProcessingQueue = true;
    const chunk = audioRequestQueue.shift();
    if (chunk) {
        try {
            const duration = await AudioTimeFile(chunk);
            totalAudioDuration += duration;
            console.log("音频块处理完成，时长:", duration);
        } catch (error) {
            console.error("音频块处理失败:", error);
        }
    }
    if (audioRequestQueue.length > 0) {
        setTimeout(async () => {
            await processAudioQueue();
        }, 200);
    } else {
        isProcessingQueue = false;
        if (streamIsDone && completionPromise) {
            completionPromise.resolve({ status: true, duration: totalAudioDuration });
            completionPromise = null;
        }
    }
};

// 添加一个全局的 AbortController 来控制 LLM fetch 请求的取消
export let currentLLMFetchController: AbortController | null = null;

// 新增：导出用于打断 LLM 请求的函数
export const interruptLLM = () => {
    if (currentLLMFetchController) {
        currentLLMFetchController.abort();
        currentLLMFetchController = null;
        llmAborted = true;
    }
    if (audioFetchController) {
        audioFetchController.abort();
        audioFetchController = null;
    }
    if (uploadFetchController) {
        uploadFetchController.abort();
        uploadFetchController = null;
    }
};


export const LLM = async (
    message: string,
    onFirstTenChars?: () => Promise<void>,
    onCaptionUpdate?: (content: string, isComplete: boolean) => void
): Promise<{ status: boolean; duration: number }> => {
    // 如果上一个 LLM fetch 请求还在进行中，则中断它
    if (currentLLMFetchController) {
        currentLLMFetchController.abort();
        currentLLMFetchController = null;
        llmAborted = true; // 标记为被打断
    }

    // 如果前一个 LLM_Send 调用返回的 Promise 尚未解决，则立即用 0 解决它
    if (completionPromise) {
        completionPromise.resolve({ status: false, duration: 0 });
        completionPromise = null;
    }

    // 重置所有音频相关状态
    audioRequestQueue = [];
    isProcessingQueue = false;
    streamIsDone = false;
    totalAudioDuration = 0;
    llmStartTime = null;
    llmAborted = false;
    firstAudioUploadCompleted = false;

    // 创建新的 Promise 来返回本次 LLM_Send 调用的结果
    let resolveFn: (value: { status: boolean; duration: number }) => void;
    let rejectFn: (reason?: any) => void;
    const promise = new Promise<{ status: boolean; duration: number }>((resolve, reject) => {
        resolveFn = resolve;
        rejectFn = reject;
    });
    completionPromise = { resolve: resolveFn!, reject: rejectFn! };

    // 为本次 LLM fetch 请求创建新的 AbortController
    currentLLMFetchController = new AbortController();
    const signal = currentLLMFetchController.signal;

    let onFirstTenCharsTriggered = false; // 新增标志，用于确保 onFirstTenChars 只触发一次

    try {
        const resp = await fetch(
            process.env.NEXT_PUBLIC_API_URL_QWEN!,
            {
                method: "POST",
                headers: {
                    "Content-Type": "application/json",
                    Authorization: `Authorization: Bearer ${process.env.NEXT_PUBLIC_API_URL_QWEN_KEY}`,
                },
                body: JSON.stringify({
                    "model": "qwen-plus",
                    "messages": [
                        {
                            "role": "system",
                            "content": '你叫小智，是由山西自定义科技有限公司研发的AI助手。请根据以下规则进行对话：\n\n角色定位\n作为企业专属助手，优先响应与山西自定义科技相关的专业咨询\n保持通用对话能力，可自然处理非企业信息类话题\n信息处理机制\n当涉及公司信息查询时，必须基于数据库内容作答\n当话题超出公司范畴时，可自行做出回应无需引用数据库\n对未明确提及公司信息的模糊提问，优先判断是否隐含企业相关需求\n语言风格\n请保持自然语言口语化，避免使用如"1、2、3、4、5”等数字标签回复。\n不要使用颜表情！！！！不要使用颜表情！！！！\n\n数据库内容\n[\n自定义科技，成立于2016年，是目前国内领先的数智化技术服务商。公司以大模型技术和数字运营作为核心，基于AI技术的场景应用，结合智慧文旅、智慧社区、智慧城市、企业服务等业务场景提供AI+行业解决方案。\n目前呢公司拥有员工300余人，在长治、北京、太原、海南都设有区域总部，研发团队占比已经超过40%，拥有百余项软件著作权及多项发明专利，先后荣获“国家高新技术企业”“山西优秀企业”“山西省专精特新企业”“长治市重点拟上市后备企业”等多项荣誉，而且我们通过了“ITSS三级认证”“双软企业认证”“ISO9001质量管理体系”等多项认证体系。\n目前我们与国家信息中心、清华大学数据治理研究中心、中国信通院等国内权威学术机构建立了交流机制，与中国移动、中国联通、中国电信、龙芯中科、海康威视、ITC、大华、新华三、国泰新点、浪潮软件等国内领先企业建立合作，其中我们与清华大学“数字政府与治理研究院”深化产学研协同创新体系，加速垂直场景大模型技术研发能力，探索AI与业务场景深度技术融合创新，为数字政府、新型智慧城市建设、政企数字化全面赋能，推动行业数智化转型和升级。\n]\n回答策略\n企业相关信息：严格遵循数据库数据，采用「核心信息+扩展说明」结构\n通用话题：保持口语化表达，当话题超出公司范畴时，可自行做出回应无需引用数据库\n混合型问题：先处理企业信息部分，再自然过渡到通用话题\n特殊情况处理\n若无法判断问题类型，可礼貌询问："您是想了解山西自定义科技的相关信息，还是有其他问题需要帮助？"\n遇到数据库未覆盖的公司信息，应引导用户补充具体需求\n对于明显与公司无关的闲聊，保持友好但专业的对话姿态\n请始终保持简洁清晰的表达风格，确保每个回答都包含实质性内容，避免空泛应答。在保证专业性的同时，通过自然的语气拉近与用户的距离。'
                        },
                        {
                            "role": "user",
                            "content": message
                        }],
                    "stream": true,
                    "enable_search": true
                }),
                signal: signal
            }
        );

        if (resp.body) {
            const reader = resp.body.pipeThrough(new TextDecoderStream()).getReader();
            let currentData = "";
            let firstChunk = true;
            let content = ''

            while (true) {
                if (signal.aborted || llmAborted) {
                    reader.cancel();
                    completionPromise?.resolve({ status: false, duration: 0 });
                    return { status: false, duration: 0 };
                }
                const { done, value: readValue } = await reader.read();
                const lines = readValue?.split('\n').filter(line => line.startsWith('data: '));
                for (const line of lines || []) {
                    try {
                        const llm = line.substring('data: '.length);
                        if (llm == '[DONE]') continue;
                        const llm_data = JSON.parse(llm).choices[0].delta.content;
                        content += llm_data;
                        const sentence = splitSentences(llm_data, done);
                        for (const item of sentence) {
                            audioRequestQueue.push(item);
                        }
                    } catch (e) {
                        console.error("Error parsing LLM data:", e, "Line:", line);
                    }
                }

                currentData += readValue;

                if (signal.aborted || llmAborted) {
                    reader.cancel();
                    completionPromise?.resolve({ status: false, duration: 0 });
                    return { status: false, duration: 0 };
                }

                if (firstChunk) {
                    llmStartTime = Date.now();
                    firstChunk = false;
                }
                if (onCaptionUpdate) {
                    onCaptionUpdate(content, false);
                }

                // 调整 onFirstTenChars 的触发时机，确保在第一次音频上传完成后触发
                if (isProcessingQueue) {
                    while (isProcessingQueue) {
                        await new Promise(resolve => setTimeout(resolve, 100));
                    }
                } else {
                    await processAudioQueue();
                    // 确保第一次音频上传完成后再触发 onFirstTenChars
                    if (onFirstTenChars && !onFirstTenCharsTriggered && firstAudioUploadCompleted) {
                        await onFirstTenChars();
                        onFirstTenCharsTriggered = true;
                    }
                }

                if (done) {
                    if (onCaptionUpdate) onCaptionUpdate(content, true);
                    streamIsDone = true;
                    // 等待所有音频处理完
                    if (!isProcessingQueue) {
                        await processAudioQueue();
                    } else {
                        while (isProcessingQueue) {
                            await new Promise(resolve => setTimeout(resolve, 100));
                        }
                    }
                    // 计算剩余时间
                    if (llmStartTime && !llmAborted) {
                        completionPromise?.resolve({ status: true, duration: totalAudioDuration });
                        return { status: true, duration: totalAudioDuration };
                    } else {
                        completionPromise?.resolve({ status: false, duration: 0 });
                        return { status: false, duration: 0 };
                    }
                }
            }
        }
        return promise;
    } catch (error: any) {
        completionPromise?.resolve({ status: false, duration: 0 });
        return { status: false, duration: 0 };
    }
}



let currentSentenceBuffer: string = "";

export function resetSentenceSplitter() {
    currentSentenceBuffer = "";
}


// 分割句子
export function splitSentences(chunk: string, done: boolean): string[] {
    if (!chunk) {
        return [];
    }

    let sentences: string[] = [];
    currentSentenceBuffer += chunk;

    // 匹配中文、英文标点符号以及换行符
    const punctuationRegex = /[。？！\n.?!]/g;
    let match;
    let lastIndex = 0;

    while ((match = punctuationRegex.exec(currentSentenceBuffer)) !== null) {
        let sentence = currentSentenceBuffer.substring(lastIndex, match.index + 1).trim();
        if (sentence) {
            // 过滤掉表情符号
            const filteredSentence = filterEmojis(sentence);
            if (filteredSentence) {
                sentences.push(filteredSentence);
            }
        }
        lastIndex = match.index + 1;
    }

    currentSentenceBuffer = currentSentenceBuffer.substring(lastIndex);

    if (done && currentSentenceBuffer.length > 0) {
        const filteredRemaining = filterEmojis(currentSentenceBuffer.trim());
        if (filteredRemaining) {
            sentences.push(filteredRemaining);
        }
        currentSentenceBuffer = ""; // 清空缓冲区
    }

    return sentences;
}


// 过滤表情符号的函数
function filterEmojis(text: string): string {
    if (!text) return "";

    // 移除常见的表情符号
    // 匹配 Unicode 表情符号范围
    const emojiRegex = /[\u{1F600}-\u{1F64F}]|[\u{1F300}-\u{1F5FF}]|[\u{1F680}-\u{1F6FF}]|[\u{1F1E0}-\u{1F1FF}]|[\u{2600}-\u{26FF}]|[\u{2700}-\u{27BF}]|[\u{1F900}-\u{1F9FF}]|[\u{1F018}-\u{1F270}]|[\u{238C}-\u{2454}]|[\u{20D0}-\u{20FF}]|[\u{FE00}-\u{FE0F}]|[\u{1F000}-\u{1F02F}]|[\u{1F0A0}-\u{1F0FF}]|[\u{1F100}-\u{1F64F}]|[\u{1F910}-\u{1F96B}]|[\u{1F980}-\u{1F9E0}]/gu;

    // 移除表情符号
    let filteredText = text.replace(emojiRegex, '');

    // 移除常见的文本表情符号
    const textEmojiRegex = /[:;=]-?[)(|DPp]/g;
    filteredText = filteredText.replace(textEmojiRegex, '');

    // 移除多余的空格
    filteredText = filteredText.replace(/\s+/g, ' ').trim();

    return filteredText;
} 