package com.lym.voice.controller;

import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;

import javax.servlet.http.HttpServletRequest;

import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.servlet.ModelAndView;

import com.baidu.aip.util.Base64Util;
import com.lym.voice.client.VoiceSdkClient;
/**
 * Html5 录音实现
 * @author liyingming
 * @Jautodoc 荒墨丶迷失  百度AI社区版主 
 */
@Controller
@RequestMapping(value = "/voiceSpeech")
public class VoiceSpeechController {

	//语音识别核心类 (javaSDK调用)
	VoiceSdkClient vsc = new VoiceSdkClient(); 

	/**
	 * Html5  语音功能页面
	 * @return
	 * @throws Exception
	 */
	@RequestMapping(value = "/voice.do")
	public ModelAndView queryVoice() throws Exception {
		ModelAndView modelAndView = new ModelAndView();
		modelAndView.setViewName("/voice/voiceIndex");
		return modelAndView;
	}
	
	/**
	 * 语音识别接口(同时调用语音识别接口)
	 * @param audioData
	 * @param request
	 * @return
	 * @throws Exception
	 */
	@RequestMapping(value = "/save.do")
	@ResponseBody
	public Map<String, Object> save(@RequestParam MultipartFile audioData,HttpServletRequest request) throws Exception {
		//注意 文件流接受  MultipartFile
		Map<String, Object> modelMap = new HashMap<String, Object>();
		try {
			//------------------------语音识别处理 start---------------------------
			//音频文件 转化为 byte[]
			InputStream content = audioData.getInputStream();
			ByteArrayOutputStream swapStream = new ByteArrayOutputStream();
			byte[] buff = new byte[100];
			int rc = 0;
			while ((rc = content.read(buff, 0, 100)) > 0) {
				swapStream.write(buff, 0, rc);
			}
			// 获得二进制数组
			byte[] byte1 = swapStream.toByteArray();
			String Rtext = vsc.getVoiceBySdk(byte1);
	        System.out.println("语音识别内容:"+Rtext);
			modelMap.put("Rtext", Rtext);
			//------------------------语音识别处理 end---------------------------
			
			//------------------------语音合成处理 start---------------------------	
			//根据语音识别内容 合成回复语音
			//---可自行修改 或者接入unit---
			String res = "";
			if("你好".equals(Rtext)){
	    		 res = "我知道你也很好呀";
	    	}else{
	    		 res = "对不起，我不太明白";
	    	}
			byte[] data = vsc.synthesis(res,request);//合成需要播放的音频二进制数据
			String base64Data = Base64Util.encode(data);
			modelMap.put("base64Data", base64Data);
			modelMap.put("res", res);
			//------------------------语音合成处理 end---------------------------	
			
			modelMap.put("success", true);
        } catch (Exception e) {
            e.printStackTrace();
            modelMap.put("success", false);
			modelMap.put("data", e.getMessage());
        }
		return modelMap;
	}
}
