const moment = require('./../node_modules/moment');
const {
	ErrorCommon,
	ErrorText2audio,
	roundFun
} = require("@/pages/voice/utils/common/index")
const {
	textToSsml,
	realToSpeed
} = require("@/pages/voice/utils/voice/index")
const {
	wxttsPost
} = require("@/pages/voice/utils/index")

/**
 * 语音合成API
 * @param {Object} uid		uid
 * @param {Object} state	voiceStore
 */
const apiPostText2audio = function(uid, state) {
	const promise = new Promise(async (resolve, reject) => {
		uni.showLoading({
			title: "合成中，0%"
		})

		// 数据转换成标签
		const text = state.text.slice(0, -1);
		const ssml = textToSsml(state.text, "wx");
		uni.showLoading({
			title: "合成中，10%"
		})

		// 语音合成
		const sessionId = uid + "-" + text.length + "-" + new Date().getTime();
		// 参数准备
		let data = {
			Text: ssml,
			SessionId: sessionId,
			Volume: state.volume,
			Speed: realToSpeed(state.sonicSpeed),
			ProjectId: 0,
			ModelType: 1,
			VoiceType: state.voice.id,
			PrimaryLanguage: 1,
			SampleRate: 16000,
			Codec: state.fileType,
			EnableSubtitle: true,
			SegmentRate: 0
		}
		if (state.voice.isEmotional === true) {
			data.EmotionCategory = state.voice.emotional
			data.EmotionIntensity = state.emotion
		}
		const options = {
			endpoint: process.uniEnv.BASE_URL,
			service: "tts",
			region: "ap-guangzhou",
			action: "TextToVoice",
			version: "2019-08-23"
		}

		// 发送请求
		await wxttsPost("", data, options).then(async res => {
			// console.log("wxttsPost", res);
			uni.showLoading({
				title: "合成中，30%"
			})
			if (res && res.data && res.data.Response) {
				const {
					RequestId,
					Audio,
					SessionId,
					Subtitles
				} = res.data.Response
				if (Audio === undefined) {
					reject({
						errCode: 1001,
						msg: "合成失败"
					});
				}

				// 文件上传
				// 生成临时文件
				// 生成临时文件路径
				const tempFilePath = wx.env.USER_DATA_PATH + "/" + sessionId + "." + state
					.fileType;
				const fileSystemManager = uni.getFileSystemManager();
				await fileSystemManager.writeFile({
					filePath: tempFilePath,
					data: Audio,
					encoding: "base64",
					success: async function(fileResp) {
						// console.log("fileResp", fileResp)
						if (fileResp && fileResp.errMsg === "writeFile:ok") {
							// 上传文件
							const result = await uniCloud.uploadFile({
								filePath: tempFilePath,
								cloudPath: "text2audio/" + moment(new Date()).format("YYYYMM") + "/" + moment(new Date()).format("DD") + "/" + uid + "/" + sessionId + "." + state.fileType,
								cloudPathAsRealPath: true,
								onUploadProgress: function(
									progressEvent) {
									var percentCompleted = Math
										.round(
											(progressEvent.loaded *
												100) /
											progressEvent.total
										);
									const value = 30 +
										percentCompleted / 100 *
										50
									uni.showLoading({
										title: "合成中，" +
											value + "%"
									})
								},
								success: function(uploadResp) {
									// console.log("uploadResp",uploadResp);
									resolve({
										text,
										fileID: uploadResp
											.fileID,
										SessionId,
										Subtitles
									});
								},
								fail: function(uploadErr) {
									reject({
										errCode: 1004,
										msg: "合成失败:" +
											uploadErr
									});
								}
							});

						} else {
							reject({
								errCode: 1004,
								msg: "合成失败"
							});
						}
					},
					fail: function(fileErr) {
						reject({
							errCode: 1003,
							msg: "合成失败:" + fileErr
						});
					}
				})

			} else {
				reject({
					errCode: 1002,
					msg: "合成失败"
				});
			}

		}).catch(res => {
			reject({
				errCode: 1000,
				msg: "合成失败:" + res
			});
		})
	});

	return promise;

}

/**
 * 试听
 * @param {Object} uid		uid
 * @param {Object} textStr	text文本
 * @param {Object} state	voiceStore
 */
const apiPostAudition = function(uid, textStr, state, fileType = "mp3") {
	const promise = new Promise(async (resolve, reject) => {
		uni.showLoading({
			title: "合成中，0%"
		})

		// 数据转换成标签
		const text = textStr.slice(0, -1);
		const ssml = textToSsml(textStr, "wx");
		// console.log("ssml",ssml)
		uni.showLoading({
			title: "合成中，10%"
		})

		// 语音合成
		const sessionId = uid + "-" + text.length + "-" + new Date().getTime();
		// 参数准备
		let data = {
			Text: ssml,
			SessionId: sessionId,
			Volume: state.volume,
			Speed: realToSpeed(state.sonicSpeed),
			ProjectId: 0,
			ModelType: 1,
			VoiceType: state.voice.id,
			PrimaryLanguage: 1,
			SampleRate: 16000,
			Codec: fileType,
			EnableSubtitle: true,
			SegmentRate: 0
		}
		if (state.voice.isEmotional === true) {
			data.EmotionCategory = state.voice.emotional
			data.EmotionIntensity = state.emotion
		}
		const options = {
			endpoint: process.uniEnv.BASE_URL,
			service: "tts",
			region: "ap-guangzhou",
			action: "TextToVoice",
			version: "2019-08-23"
		}

		// 发送请求
		await wxttsPost("", data, options).then(async res => {
			// console.log("wxttsPost", res);
			uni.showLoading({
				title: "合成中，30%"
			})
			if (res && res.data && res.data.Response) {
				const {
					RequestId,
					Audio,
					SessionId,
					Subtitles
				} = res.data.Response
				if (Audio === undefined) {
					reject({
						errCode: 1001,
						msg: "合成失败"
					});
				}

				// 生成临时文件
				// 生成临时文件路径
				const tempFilePath = wx.env.USER_DATA_PATH + "/" + sessionId + "." +fileType;
				const fileSystemManager = uni.getFileSystemManager();
				await fileSystemManager.writeFile({
					filePath: tempFilePath,
					data: Audio,
					encoding: "base64",
					success: async function(fileResp) {
						// console.log("fileResp", fileResp)
						if (fileResp && fileResp.errMsg === "writeFile:ok") {
							resolve({
								text,
								tempFilePath,
								SessionId,
								Subtitles
							});
						}
					},
					fail: function(fileErr) {
						reject({
							errCode: 1003,
							msg: "合成失败:" + fileErr
						});
					}
				})

			} else {
				reject({
					errCode: 1002,
					msg: "合成失败"
				});
			}

		}).catch(res => {
			reject({
				errCode: 1000,
				msg: "合成失败:" + res
			});
		})
	});

	return promise;

}

/**
 * 长文本语音合成
 * @param {Object} uid
 * @param {Object} state
 * @param {Object} longTextArr
 */
const apiPostLongText2audio = async function(uid, state, longTextArr) {
	uni.showLoading({
		title: "合成中，0%"
	})
	console.log("长文本语音合成")
	const promise = new Promise(async (resolve, reject) => {
		// 定义消耗字符长度句柄
		let consumeWordNum = 0;
		// 定义接受合成语音数据
		let respAudioArr = [];
		try {
			////////// 参数校验
			if (longTextArr == null || longTextArr.length == 0) {
				throw new ErrorText2audio(1001, '参数错误', consumeWordNum);
			}

			////////// 语音合成
			// 定义进度句柄
			let speedNum = 10;
			uni.showLoading({
				title: "合成中，" + speedNum + "%"
			})

			// 判断分步
			const step = longTextArr.length
			console.log("长文本语音合成-共%s步",step)
			
			// 合成语音文本句柄
			let respLongText = "";
			// 计算部署句柄
			let stepCount = 1;
			// 语音合成进度量
			const text2audioSpeedTotalNum = 60;
			// 单一语音合成进度量
			let singleStepTotalNum = roundFun((text2audioSpeedTotalNum / step),2);
			// 单一进度步骤
			let singleStep = 2;
			// 单一语音合成进度量
			let singleStepNum = roundFun((singleStepTotalNum / singleStep),2);
			// 合成语音
			for (let longText of longTextArr) {
				console.log("长文本语音合成-共%s步-第%s步-开始",step,stepCount);
				// 数据转换成标签 长文本转换已处理 不重复处理
				respLongText += longText;
				//// 参数准备
				console.log("长文本语音合成-共%s步-第%s步-1构造语音合成参数", step, stepCount);
				// console.log("长文本语音合成-共%s步-第%s步-合成文本：%s", step, stepCount,longText);
				const sessionId = uid + "-" + longText.length + "-" + new Date().getTime();
				let data = {
					Text: longText,
					SessionId: sessionId,
					Volume: state.volume,
					Speed: realToSpeed(state.sonicSpeed),
					ProjectId: 0,
					ModelType: 1,
					VoiceType: state.voice.id,
					PrimaryLanguage: 1,
					SampleRate: 16000,
					Codec: state.fileType,
					EnableSubtitle: true,
					SegmentRate: 0
				}
				if (state.voice.isEmotional === true) {
					data.EmotionCategory = state.voice.emotional
					data.EmotionIntensity = state.emotion
				}
				const options = {
					endpoint: process.uniEnv.BASE_URL,
					service: "tts",
					region: "ap-guangzhou",
					action: "TextToVoice",
					version: "2019-08-23"
				}
				speedNum += singleStepNum;
				uni.showLoading({title: "合成中，" + roundFun(speedNum,2) + "%"});
				//// 合成语音
				console.log("长文本语音合成-共%s步-第%s步-2合成语音", step, stepCount);
				
				// 发送请求
				await wxttsPost("", data, options).then(async res => {
					speedNum += singleStepNum;
					uni.showLoading({title: "合成中，" + roundFun(speedNum,2) + "%"});
					if (res && res.data && res.data.Response) {
						const {
							RequestId,
							Audio,
							SessionId,
							Subtitles
						} = res.data.Response
						if (Audio === undefined) {
							throw new ErrorText2audio(1002, '合成失败-没有返回合成语音数据', consumeWordNum);
						}
						
						// 设置原始数据
						respAudioArr.push({requestId:RequestId,audio:Audio,sessionId:SessionId,subtitles:Subtitles})
						
					} else {
						throw new ErrorText2audio(1003, '合成失败-返回数据为空', consumeWordNum);
					}

				}).catch(res => {
					console.log("res",res)
					throw new ErrorText2audio(res.code, res.message, consumeWordNum);
				})

				consumeWordNum += longText.length;

				console.log("长文本语音合成-共%s步-第%s步-结束",step,stepCount);
				stepCount++;
			}
		
			//////////// 响应数据处理
			// 校验
			if (respAudioArr.length == 0) {
				throw new ErrorText2audio(1004, '合成失败-校验响应数据失败', consumeWordNum);
			}
		
			// 处理
			console.log("长文本语音合成-处理合成语音数据")
			let audioData = null;
			let subtitlesData = [];
			let respForCount = 0;
			let lastEndTime = 0;
			for (let respAudioData of respAudioArr) {
				// console.log("respAudioData",respAudioData)
				// 合并base64的audio数据
				audioData += respAudioData.audio;
				// 处理subtitles数据
				// 音段音频结束多长250长度
				let itemCount = 1;
				for (let itemSubtitlesData of respAudioData.subtitles) {
					if (respForCount == 0) {
						// 设置数据
						subtitlesData.push(itemSubtitlesData)
					}
					else {
						// 设置数据
						subtitlesData.push(
							{
								"Text": itemSubtitlesData.Text,
								"BeginTime": itemSubtitlesData.BeginTime + lastEndTime,
								"EndTime": itemSubtitlesData.EndTime + lastEndTime,
								"BeginIndex": subtitlesData.length,
								"EndIndex": subtitlesData.length + 1,
								"Phoneme": itemSubtitlesData.Phoneme
							}
						)
					}
					
					if(itemCount === respAudioData.subtitles.length){
						lastEndTime = itemSubtitlesData.EndTime + lastEndTime + 100
					}
					itemCount++;
				}
				// console.log("subtitlesData",subtitlesData);
				// console.log("lastEndTime",lastEndTime);
				respForCount++;
			}
			
			speedNum += 10;
			uni.showLoading({title: "合成中，" + roundFun(speedNum,2) + "%"});
			////////////// 文件上传
			// 生成临时文件
			// 生成临时文件路径
			console.log("长文本语音合成-文件上传开始")
			const sessionId = uid + "-" + respLongText.length + "-" + new Date().getTime();
			const tempFilePath = wx.env.USER_DATA_PATH + "/" + sessionId + "." + state.fileType;
			const fileSystemManager = uni.getFileSystemManager();
			await fileSystemManager.writeFile({
				filePath: tempFilePath,
				data: audioData,
				encoding: "base64",
				success: async function(fileResp) {
					// console.log("fileResp", fileResp)
					if (fileResp && fileResp.errMsg === "writeFile:ok") {
						// 上传文件
						const result = await uniCloud.uploadFile({
							filePath: tempFilePath,
							cloudPath: "text2audio/" + moment(new Date()).format("YYYYMM") + "/" + moment(new Date()).format("DD") + "/" + uid + "/" + sessionId + "." + state.fileType,
							cloudPathAsRealPath: true,
							onUploadProgress: function(progressEvent) {
								var percentCompleted = Math.round( (progressEvent.loaded * 100) / progressEvent.total );
								const value = speedNum + percentCompleted / 100 * 10
								uni.showLoading({title: "合成中，" + roundFun(value,2) + "%"})
							},
							success: function(uploadResp) {
								resolve({
									fileID: uploadResp.fileID,
									sessionId,
									subtitlesData
								});
							},
							fail: function(uploadErr) {
								throw new ErrorText2audio(1005, '合成失败-生成临时文件失败', consumeWordNum);
							}
						});
			
					} else {
						throw new ErrorText2audio(1006, '生成临时文件失败', consumeWordNum);
					}
				},
				fail: function(fileErr) {
					throw new ErrorText2audio(1007, '生成临时文件失败-异常：' + fileErr.message, consumeWordNum);
				}
			})
			
		} 
		catch (error) {
			console.log("error",error)
			reject({
				code: error.code,
				message: error.message,
				consume: error.consume == null ? 0 : error.consume
			});
		}

	});

	return promise;

}

/**
 * 生成语音合成API
 * @param {Object} uid
 * @param {Object} state
 * @param {Object} textstr
 * @param {Object} isEmotional
 * @param {Object} emotional
 */
const apiPostText2audioGenText = function(uid, state, textstr,isEmotional,emotional) {
	const promise = new Promise(async (resolve, reject) => {
		
		// 数据转换成标签
		const text = textstr.slice(0, -1);
		const ssml = textToSsml(text, "wx");
		let eIndex = 0;
		
		// 语音合成
		const sessionId = uid + "-" + text.length + "-" + new Date().getTime();
		// 参数准备
		let data = {
			Text: ssml,
			SessionId: sessionId,
			Volume: state.volume,
			Speed: realToSpeed(state.sonicSpeed),
			ProjectId: 0,
			ModelType: 1,
			VoiceType: parseInt(uid),
			PrimaryLanguage: 1,
			SampleRate: 16000,
			Codec: state.fileType,
			EnableSubtitle: true,
			SegmentRate: 0
		}
		if (isEmotional === true) {
			data.EmotionCategory = emotional
			switch (emotional) {
				case "neutral":
					eIndex = "1";
					break;
				case "sad":
					eIndex = "2";
					break;
				case "happy":
					eIndex = "3";
					break;
				case "angry":
					eIndex = "4";
					break;
				case "fear":
					eIndex = "5";
					break;
				case "news":
					eIndex = "6";
					break;
				case "story":
					eIndex = "7";
					break;
				case "radio":
					eIndex = "8";
					break;
				case "poetry":
					eIndex = "9";
					break;
				case "call":
					eIndex = "10";
					break;
				default:
			}
			data.EmotionIntensity = state.emotion
		}
		const options = {
			endpoint: process.uniEnv.BASE_URL,
			service: "tts",
			region: "ap-guangzhou",
			action: "TextToVoice",
			version: "2019-08-23"
		}
		console.log("uid:%s-eIndex:%s",uid,eIndex)
		// 发送请求
		await wxttsPost("", data, options).then(async res => {
			// console.log("wxttsPost", res);
			if (res && res.data && res.data.Response) {
				const {
					RequestId,
					Audio,
					SessionId,
					Subtitles
				} = res.data.Response
				if (Audio === undefined) {
					reject({
						errCode: 1001,
						msg: "合成失败"
					});
				}
				// console.log(res)
				// 文件上传
				// 生成临时文件
				// 生成临时文件路径
				const tempFilePath = wx.env.USER_DATA_PATH + "/" + sessionId + "." + state
					.fileType;
				const fileSystemManager = uni.getFileSystemManager();
				await fileSystemManager.writeFile({
					filePath: tempFilePath,
					data: Audio,
					encoding: "base64",
					success: async function(fileResp) {
						// console.log("fileResp", fileResp)
						if (fileResp && fileResp.errMsg === "writeFile:ok") {
							// 上传文件
							const result = await uniCloud.uploadFile({
								filePath: tempFilePath,
								cloudPath: "voice/"+uid+"/voice-" + uid + "-" + eIndex +"." + state.fileType,
								cloudPathAsRealPath: true,
								
								success: function(uploadResp) {
									// console.log("uploadResp",uploadResp);
									resolve({
										text,
										fileID: uploadResp.fileID,
										SessionId,
										Subtitles
									});
								},
								fail: function(uploadErr) {
									reject({
										errCode: 1004,
										msg: "合成失败:" +
											uploadErr
									});
								}
							});

						} else {
							reject({
								errCode: 1004,
								msg: "合成失败"
							});
						}
					},
					fail: function(fileErr) {
						reject({
							errCode: 1003,
							msg: "合成失败:" + fileErr
						});
					}
				})

			} else {
				reject({
					errCode: 1002,
					msg: "合成失败"
				});
			}

		}).catch(res => {
			reject({
				errCode: 1000,
				msg: "合成失败:" + res
			});
		})
	});

	return promise;

}


module.exports = {
	/**
	 * 语音合成API
	 * @param {Object} uid		uid
	 * @param {Object} state	voiceStore
	 */
	apiPostText2audio,
	/**
	 * 试听
	 * @param {Object} uid		uid
	 * @param {Object} textStr	text文本
	 * @param {Object} state	voiceStore
	 */
	apiPostAudition,
	/**
	 * 长文本语音合成
	 * @param {Object} uid
	 * @param {Object} state
	 * @param {Object} longTextArr
	 */
	apiPostLongText2audio,
	
	/**
	 * 生成语音合成API
	 * @param {Object} uid
	 * @param {Object} state
	 * @param {Object} textstr
	 * @param {Object} isEmotional
	 * @param {Object} emotional
	 */
	apiPostText2audioGenText
}