/**
 * Copyright (c) 2012-2014 Yunzhisheng(Shanghai) Co.Ltd. All right reserved.
 * @FileName : BaiduVoiceProtocolImp.java
 * @ProjectName : DpadnaviVassistProj
 * @PakageName : com.dpadnavi.vassist.interfaces.imp
 * @Author : tomliang
 * @CreateDate : 2015-9-3
 */
package com.dpadnavi.vassist.interfaces.imp;

import java.util.ArrayList;
import java.util.Arrays;

import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.speech.RecognitionListener;
import android.speech.SpeechRecognizer;
import com.baidu.speech.VoiceRecognitionService;
import com.dpadnavi.vassist.Constant;
import com.dpadnavi.vassist.R;
import com.dpadnavi.vassist.interfaces.VoiceListener;
import com.dpadnavi.vassist.interfaces.VoiceProtocol;
import com.dpadnavi.vassist.model.BaiduVoiceResponeModel;
import com.dpadnavi.vassist.utils.GsonUtils;
import com.dpadnavi.vassist.utils.LogUtils;

/**
 * @Module : 隶属模块名
 * @Comments : 语音协议实现
 * @Author : tomliang
 * @CreateDate : 2015-9-3
 * @ModifiedBy : 修改人
 * @ModifiedDate: 2015-9-3
 * @Modified: 2015-9-3: 实现基本功能
 */
public class BaiduVoiceProtocolImp
		implements
			VoiceProtocol<BaiduVoiceResponeModel>,
			RecognitionListener {

	public static final int STATUS_None = 0;
	public static final int STATUS_WaitingReady = 2;
	public static final int STATUS_Ready = 3;
	public static final int STATUS_Speaking = 4;
	public static final int STATUS_Recognition = 5;
	private int status = STATUS_None;
	private static final int EVENT_ERROR = 11;

	private SpeechRecognizer speechRecognizer;
	private Context mContext;
	private VoiceListener<BaiduVoiceResponeModel> listener;
	private static final String TAG = "百度语音服务";

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.dpadnavi.vassist.interfaces.VoiceProtocol#init()
	 */
	@Override
	public void init(Context context) {
		LogUtils.e(TAG, TAG + " init");
		mContext = context;
		speechRecognizer = SpeechRecognizer.createSpeechRecognizer(context,
				new ComponentName(context, VoiceRecognitionService.class));
		speechRecognizer.setRecognitionListener(this);
	}

	/**
	 * 
	 * @author : tomliang
	 * @description : 设置参数
	 * @date : 2015-9-3
	 * @param intent
	 *            : void
	 */
	public void bindParams(Intent intent) {
		// 提示音资源
		intent.putExtra(Constant.EXTRA_SOUND_START,
				R.raw.bdspeech_recognition_start);
		intent.putExtra(Constant.EXTRA_SOUND_END, R.raw.bdspeech_speech_end);
		intent.putExtra(Constant.EXTRA_SOUND_SUCCESS,
				R.raw.bdspeech_recognition_success);
		intent.putExtra(Constant.EXTRA_SOUND_ERROR,
				R.raw.bdspeech_recognition_error);
		intent.putExtra(Constant.EXTRA_SOUND_CANCEL,
				R.raw.bdspeech_recognition_cancel);
		
		// 保存识别过程产生的录音文件
		//intent.putExtra(Constant.EXTRA_OUTFILE, "sdcard/outfile.pcm");
		
		//采样率
		//intent.putExtra(Constant.EXTRA_SAMPLE, Integer.parseInt(tmp));
		
		//语种
		//intent.putExtra(Constant.EXTRA_LANGUAGE, tmp);

		intent.putExtra(Constant.EXTRA_NLU, "enable");

		// 语音活动检测
		//intent.putExtra(Constant.EXTRA_VAD, tmp);
		
		// 垂直领域
		//intent.putExtra(Constant.EXTRA_PROP, Integer.parseInt(tmp));

		// offline asr
//		{
//			intent.putExtra(Constant.EXTRA_OFFLINE_ASR_BASE_FILE_PATH,
//					"/sdcard/easr/s_1");
//			intent.putExtra(Constant.EXTRA_LICENSE_FILE_PATH,
//					"/sdcard/easr/license-tmp-20150530.txt");
//			if (null != prop) {
//				int propInt = Integer.parseInt(prop);
//				if (propInt == 10060) {
//					intent.putExtra(Constant.EXTRA_OFFLINE_LM_RES_FILE_PATH,
//							"/sdcard/easr/s_2_Navi");
//				} else if (propInt == 20000) {
//					intent.putExtra(Constant.EXTRA_OFFLINE_LM_RES_FILE_PATH,
//							"/sdcard/easr/s_2_InputMethod");
//				}
//			}
//			intent.putExtra(Constant.EXTRA_OFFLINE_SLOT_DATA,
//					buildTestSlotData());
//		}
	}

	private String buildTestSlotData() {
		JSONObject slotData = new JSONObject();
		JSONArray name = new JSONArray().put("李涌泉").put("郭下纶");
		JSONArray song = new JSONArray().put("七里香").put("发如雪");
		JSONArray artist = new JSONArray().put("周杰伦").put("李世龙");
		JSONArray app = new JSONArray().put("手机百度").put("百度地图");
		JSONArray usercommand = new JSONArray().put("关灯").put("开门");
		try {
			slotData.put(Constant.EXTRA_OFFLINE_SLOT_NAME, name);
			slotData.put(Constant.EXTRA_OFFLINE_SLOT_SONG, song);
			slotData.put(Constant.EXTRA_OFFLINE_SLOT_ARTIST, artist);
			slotData.put(Constant.EXTRA_OFFLINE_SLOT_APP, app);
			slotData.put(Constant.EXTRA_OFFLINE_SLOT_USERCOMMAND, usercommand);
		} catch (JSONException e) {

		}
		return slotData.toString();
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.dpadnavi.vassist.interfaces.VoiceProtocol#start()
	 */
	@Override
	public void start() {
		LogUtils.e(TAG, TAG + " start");
		Intent intent = new Intent();
		bindParams(intent);
		speechRecognizer.startListening(intent);
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.dpadnavi.vassist.interfaces.VoiceProtocol#stop()
	 */
	@Override
	public void stop() {
		LogUtils.e(TAG, TAG + " stop");
		speechRecognizer.stopListening();
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.dpadnavi.vassist.interfaces.VoiceProtocol#cancle()
	 */
	@Override
	public void cancle() {
		LogUtils.e(TAG, TAG + " cancle");
		speechRecognizer.cancel();
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.dpadnavi.vassist.interfaces.VoiceProtocol#destory()
	 */
	@Override
	public void destory() {
		LogUtils.e(TAG, TAG + " destory");
		speechRecognizer.destroy();
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onBeginningOfSpeech()
	 */
	@Override
	public void onBeginningOfSpeech() {
		status = STATUS_Speaking;
		LogUtils.i(TAG, "检测到用户的已经开始说话");
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onBufferReceived(byte[])
	 */
	@Override
	public void onBufferReceived(byte[] buffer) {

	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onEndOfSpeech()
	 */
	@Override
	public void onEndOfSpeech() {
		status = STATUS_Recognition;
		LogUtils.i(TAG, "检测到用户的已经停止说话");
		if (listener != null) {
			listener.onEndOfSpeech();
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onError(int)
	 */
	@Override
	public void onError(int error) {
		status = STATUS_None;
		StringBuilder sb = new StringBuilder();
		switch (error) {
			case SpeechRecognizer.ERROR_AUDIO :
				sb.append("音频问题");
				break;
			case SpeechRecognizer.ERROR_SPEECH_TIMEOUT :
				sb.append("没有语音输入");
				break;
			case SpeechRecognizer.ERROR_CLIENT :
				sb.append("其它客户端错误");
				break;
			case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS :
				sb.append("权限不足");
				break;
			case SpeechRecognizer.ERROR_NETWORK :
				sb.append("网络问题");
				break;
			case SpeechRecognizer.ERROR_NO_MATCH :
				sb.append("没有匹配的识别结果");
				break;
			case SpeechRecognizer.ERROR_RECOGNIZER_BUSY :
				sb.append("引擎忙");
				break;
			case SpeechRecognizer.ERROR_SERVER :
				sb.append("服务端错误");
				break;
			case SpeechRecognizer.ERROR_NETWORK_TIMEOUT :
				sb.append("连接超时");
				break;
		}
		sb.append(":" + error);
		LogUtils.i(TAG, sb.toString());
		if (listener != null) {
			listener.onError(error);
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onEvent(int, android.os.Bundle)
	 */
	@Override
	public void onEvent(int eventType, Bundle params) {
		switch (eventType) {
			case EVENT_ERROR :
				String reason = params.get("reason") + "";
				LogUtils.e(TAG, "EVENT_ERROR, " + reason);
				break;
			case VoiceRecognitionService.EVENT_ENGINE_SWITCH :
				int type = params.getInt("engine_type");
				LogUtils.e(TAG, "*引擎切换至" + (type == 0 ? "在线" : "离线"));
				break;
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see
	 * android.speech.RecognitionListener#onPartialResults(android.os.Bundle)
	 */
	@Override
	public void onPartialResults(Bundle partialResults) {
		ArrayList<String> nbest = partialResults
				.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
		if (nbest.size() > 0) {
			LogUtils.i(TAG,
					"~临时识别结果：" + Arrays.toString(nbest.toArray(new String[0])));
		}
		if (listener != null) {
			listener.onPartialResults(null);
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see
	 * android.speech.RecognitionListener#onReadyForSpeech(android.os.Bundle)
	 */
	@Override
	public void onReadyForSpeech(Bundle arg0) {
		status = STATUS_Ready;
		LogUtils.i(TAG, "准备就绪，可以开始说话");
		if (listener != null) {
			listener.onReadyForSpeech();
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onResults(android.os.Bundle)
	 */
	@Override
	public void onResults(Bundle results) {
		status = STATUS_None;
		ArrayList<String> nbest = results
				.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
		
		LogUtils.i(
				TAG,
				"识别成功："
						+ Arrays.toString(nbest.toArray(new String[nbest.size()])));
		
		String json = results.getString("origin_result");
		try {
			LogUtils.i(TAG, new JSONObject(json).toString(4));
			JSONObject obj = new JSONObject(json);
			String content = obj.getString("content");
			if(content != null || !"".equals(content)){
				JSONObject contentobj = new JSONObject(content);
				String json_res = contentobj.getString("json_res");
				System.out.println(json_res);
				BaiduVoiceResponeModel model = GsonUtils.json2bean(json_res, BaiduVoiceResponeModel.class);
				model.setRaw_json(json_res);
				if(model != null){
					if (listener != null) {
						listener.onResults(model);
						return;
					}
				}
			}
		} catch (JSONException e) {
			LogUtils.e(TAG, "origin_result=[warning: bad json]\n" + json);
			e.printStackTrace();
		}
		
		if (listener != null) {
			listener.onResults(null);
			return;
		}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see android.speech.RecognitionListener#onRmsChanged(float)
	 */
	@Override
	public void onRmsChanged(float rmsdB) {

	}

	@Override
	public void setVoiceListener(VoiceListener<BaiduVoiceResponeModel> listener) {
		this.listener = listener;
	}

}
