import {
	store
} from "../store/index.js"
var supportMediaSource = 'MediaSource' in window

const innerAudioContext = uni.createInnerAudioContext();
innerAudioContext.autoplay = true;

let moxdr = "https://duxinggj-1251133427.cos.ap-guangzhou.myqcloud.com/yx/yuyue/xqw/5c8949d502d7b47897.mp3"
let qinhxd ="https://duxinggj-1251133427.cos.ap-guangzhou.myqcloud.com/yx/yuyue/xqw/djqw.mp3"
let sourceBuffer;
let audioQueue = []; // 用于缓存数据块的队列
export const jxcxfe = () => {
	if (supportMediaSource) {
		let mediaSource = new MediaSource();
		innerAudioContext.src = URL.createObjectURL(mediaSource);
		mediaSource.addEventListener('sourceopen', function() {
			try {
				sourceBuffer = mediaSource.addSourceBuffer('audio/mpeg');
			} catch (e) {

			}
			sourceBuffer.addEventListener('updateend', function() {
				if (audioQueue.length > 0 && !sourceBuffer.updating) {
					// 从队列中取出下一个数据块并追加
					let nextBuffer = audioQueue.shift();
					sourceBuffer.appendBuffer(nextBuffer);
				} else if (audioQueue.length === 0) {
					// 如果队列为空且没有更多数据，结束流
					try {
						mediaSource.endOfStream();
					} catch (e) {

					}
				}
			});

		});
	}
}



 jxcxfe()

innerAudioContext.onPlay(() => {
	if(innerAudioContext.src!=moxdr&&innerAudioContext.src!=qinhxd){
		store.state.isboxrtt = 2
	}
	
});
innerAudioContext.onStop(() => {
	store.state.isboxrtt = 0
	store.state.isanzhu = false
});
innerAudioContext.onPause(() => {
	store.state.isboxrtt = 0
	jxcxfe()
})

const voiceId = 'hJ40it68M3EdB1krnCAX';
const model = 'eleven_multilingual_v2';
const xiapikey = "243cdc1422560a5095225a47a9291f77"
const wsUrl =
	`wss://api.elevenlabs.io/v1/text-to-speech/${voiceId}/stream-input?optimize_streaming_latency=1&model_id=` + model;



let jnxxr = ""
let jmxcxer = 0
let jxxrr = 0
let jxccer = ""
let jxdrt = 0
let kmxdrt = []

let jhssd = {
	"voice_settings": {
		stability: 0.5,
		similarity_boost: 0.8,
		style: 0.8,
	},
	model_id: model,
	"xi_api_key": xiapikey, // replace with your API key
}
// ty=1 为点击播放  2 为收到结束消息
let jnxcddft= ""
export const initsock = (text,ty) => {

	if (store.state.isjysd) { //禁音
		return
	}
	// yummsdd(text,ty)
	// 	return
	if (!supportMediaSource) {
		jnxcddft = text
		yummsdd(text,ty)
		return
	}
	store.state.isboxrtt = 1
	kmxdrt = []
	// if (store.state.tetsdr) {
	// 	store.state.isboxrtt = 2
	// 	const blob = base64ToBlob(store.state.tetsdr, "audio/mp3")
	// 	const audioUrl = URL.createObjectURL(blob);
	// 	innerAudioContext.src = audioUrl;
	// 	return
	// }
	jnxxr = ""
	jmxcxer = 0
	jxxrr = 0
	let jxzczxcer = ""
	const socket = new WebSocket(wsUrl);

	// 2. Initialize the connection by sending the BOS message
	socket.onopen = function(event) {
		const bosMessage = {
			"text": " ",
			...jhssd
		};

		socket.send(JSON.stringify(bosMessage));
		// 3. Send the input text message ("Hello World")
		const textMessage = {
			"text": text,
			...jhssd
		};

		socket.send(JSON.stringify(textMessage));
		const eosMessage = {
			"text": "",
			...jhssd
		};
		socket.send(JSON.stringify(eosMessage));
	};
	socket.onmessage = function(event) {
		const response = JSON.parse(event.data);
		if (response.audio) {
			store.state.tetsdr += atob(response.audio)
			let binaryString = uni.base64ToArrayBuffer(response.audio);
			// 将数据块添加到队列中
			audioQueue.push(binaryString);
			// 如果 SourceBuffer 没有正在更新，开始处理队列
			if (!sourceBuffer.updating && audioQueue.length > 0) {
				let nextBuffer = audioQueue.shift();
				sourceBuffer.appendBuffer(nextBuffer);
			}

			console.log("Received audio chunk");
		} else {
			console.log("No audio data in the response");
		}

		if (response.isFinal) {
			innerAudioContext.play()
			// the generation is complete
		}

		if (response.normalizedAlignment) {
			// use the alignment info if needed
		}
	};

	// Handle errors
	socket.onerror = function(error) {
		console.error(`WebSocket Error: ${error}`);
	};

	// Handle socket closing
	socket.onclose = function(event) {
		if (event.wasClean) {
			console.info(`Connection closed cleanly, code=${event.code}, reason=${event.reason}`);
		} else {
			console.warn('Connection died');
		}
	};
}

export const zhsdr = () => {
	
store.state.isboxrtt = 0
		// innerAudioContext.stop()
		innerAudioContext.destroy()

}
export const zanting = () => {
		if (!supportMediaSource) {
			innerAudioContext.stop()
		}
	
}
export const ojxdrr =()=>{
	innerAudioContext.src=moxdr
	innerAudioContext.play()
}
export const qwuixdr =()=>{
	innerAudioContext.src=qinhxd
	innerAudioContext.play()
}

const yummsdd = async (text,ty) => {

	const body = {
		text: text,
		...jhssd
	};
	// if(ty==2){
	// 	store.state.isboxrtt = 0
	// }else{
	// 	store.state.isboxrtt = 1
	// }
	store.state.isboxrtt = 1
	const response = await uni.request({
		url: 'https://api.elevenlabs.io/v1/text-to-speech/' + voiceId,
		method: "POST",
		header: {
			Accept: "audio/mpeg",
			"Content-Type": "application/json",
			"xi-api-key": xiapikey,
		},
		responseType: "arraybuffer",
		data: body,
	});
	if(jnxcddft!=text){
		return
	}
	const blob = new Blob([response?.data], {
		type: "audio/mp3"
	});
	const audioUrl = URL.createObjectURL(blob);
	innerAudioContext.src = audioUrl;
}
const base64ToBlob = (byteCharacters, contentType) => {
	const sliceSize = 1024;
	const bytesLength = byteCharacters.length;
	const slicesCount = Math.ceil(bytesLength / sliceSize);
	const byteArrays = new Array(slicesCount);
	for (let sliceIndex = 0; sliceIndex < slicesCount; ++sliceIndex) {
		const begin = sliceIndex * sliceSize;
		const end = Math.min(begin + sliceSize, bytesLength);
		const bytes = new Array(end - begin);
		for (let offset = begin, i = 0; offset < end; ++i, ++offset) {
			bytes[i] = byteCharacters[offset].charCodeAt(0);
		}
		byteArrays[sliceIndex] = new Uint8Array(bytes);
	}
	return new Blob(byteArrays, {
		type: contentType
	});
}