package com.infisight.hudprojector.kdxfspeech;

import org.json.JSONException;
import org.json.JSONObject;

import android.content.Context;
import android.content.Intent;
import android.media.AudioManager;
import android.media.MediaRecorder;
import android.media.SoundPool;
import android.os.Bundle;
import android.os.Environment;
import android.util.Log;
import android.widget.Toast;

import com.google.gson.Gson;
import com.iflytek.cloud.ErrorCode;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.RecognizerListener;
import com.iflytek.cloud.RecognizerResult;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechRecognizer;
import com.iflytek.cloud.SpeechUtility;
import com.iflytek.cloud.VoiceWakeuper;
import com.iflytek.cloud.WakeuperListener;
import com.iflytek.cloud.WakeuperResult;
import com.iflytek.cloud.util.ResourceUtil;
import com.iflytek.cloud.util.ResourceUtil.RESOURCE_TYPE;
import com.infisight.hudprojector.MainActivity;
import com.infisight.hudprojector.R;
import com.infisight.hudprojector.data.SpeechVoiceData;
import com.infisight.hudprojector.data.VoiceDataClass;
import com.infisight.hudprojector.util.CommonUtil;
import com.infisight.hudprojector.util.Constants;
import com.infisight.hudprojector.util.ToastUtil;

/**
 * 语音识别
 * 
 * @author hao
 * 
 */
public class SpeechVoiceRecognition implements WakeuperListener {

	public String TAG = "SpeechVoiceRecognition";
	// 语音听写对象
	SpeechRecognizer mIat = null;
	Context con;
	String content;
	Gson gson;
	public static SpeechVoiceCompound svc;
	public static SpeechVoiceRecognition svrManager;
	VoiceDataClass vdc;
	// 语音唤醒对象
	private VoiceWakeuper mIvw;
	int score = 0;
	private SoundPool soundPool;
	int isGoback = 0;
	MainActivity main = new MainActivity();

	// private String transferKey = null;
	public SpeechVoiceRecognition(Context mContext, String str,
			SpeechVoiceCompound tm) {
		soundPool = new SoundPool(10, AudioManager.STREAM_SYSTEM, 5);
		soundPool.load(mContext, R.raw.push_bell_b, 1);
		initWake(mContext);
		con = mContext;
		if (tm != null) {
			svc = tm;
		} else {
			svc = SpeechVoiceCompound.getInstance(con, "", null);
		}
		init(mContext);
	}

	void initWake(Context mContext) {
		if (mIvw == null) {
			StringBuffer param = new StringBuffer();
			String resPath = ResourceUtil.generateResourcePath(mContext,
					RESOURCE_TYPE.assets, "ivw/" + "558139a8" + ".jet");
			param.append(ResourceUtil.IVW_RES_PATH + "=" + resPath);
			param.append("," + ResourceUtil.ENGINE_START + "="
					+ SpeechConstant.ENG_IVW);
			boolean ret = SpeechUtility.getUtility().setParameter(
					ResourceUtil.ENGINE_START, param.toString());
			if (!ret) {
				Log.d("wake", "启动本地引擎失败！");
			}
			// 初始化唤醒对象
			mIvw = VoiceWakeuper.createWakeuper(mContext, null);
			mIvw = VoiceWakeuper.createWakeuper(mContext, null);
			mIvw = VoiceWakeuper.getWakeuper();
			if (mIvw != null) {
				// 清空参数
				mIvw.setParameter(SpeechConstant.PARAMS, null);
				mIvw.setParameter(SpeechConstant.IVW_SST, "wakeup");
				// 设置持续进行唤醒
				mIvw.setParameter(SpeechConstant.KEEP_ALIVE, "1");
			}
		}
	}

	void init(Context c) {
		if (mIat == null) {
			mIat = SpeechRecognizer.createRecognizer(c, mInitListener);
			setParam();
		}
		mIat.startListening(recognizerListener);
	}

	public static SpeechVoiceRecognition getInstance(Context context,
			String str, SpeechVoiceRecognition svr) {
		if (svrManager == null) {
			svrManager = new SpeechVoiceRecognition(context, str, svc);
		}
		return svrManager;
	}

	/**
	 * 初始化监听器。
	 */
	private InitListener mInitListener = new InitListener() {

		@Override
		public void onInit(int code) {
			if (code != ErrorCode.SUCCESS) {
				// showTip("初始化失败,错误码："+code);
				Log.d("mContext", "SpeechRecognizer init() code = " + code);
			} else {
				Log.d("mContext", "引擎初始化成功");
			}
		}
	};

	/**
	 * 听写监听器。
	 */
	private RecognizerListener recognizerListener = new RecognizerListener() {

		@Override
		public void onBeginOfSpeech() {
			// showTip("开始说话");
			Log.i("mContext", "开始说话");
			Log.d("TIMECOUNT", "开始说话:"+CommonUtil.getCurrentMinute());
			CommonUtil.processBroadcast(con, Constants.MAIN_ICON_RUI_STATE, 1);
		}

		@Override
		public void onError(SpeechError error) {
			// showTip(error.getPlainDescription(true));
			Log.i("mContext",
					error.getPlainDescription(true) + "听写监听器"
							+ error.getErrorCode());
			// svc.startSpeaking(Constants.F_C_RUI_GO_BACK
			// + error.getPlainDescription(true) + "错误码是："
			// + error.getErrorCode());
			if (mIvw != null) {
				mIvw.startListening(SpeechVoiceRecognition.this);
			}
			if (mIat != null) {
				mIat.stopListening();
			}
			// mIat.startListening(recognizerListener);
		}

		@Override
		public void onEndOfSpeech() {
			// showTip("结束说话");
			Log.i("mContext", "结束说话");
			// mIat.startListening(recognizerListener);
			Log.d("TIMECOUNT", "结束说话:"+CommonUtil.getCurrentMinute());
			svc.startSpeaking(Constants.F_C_RUI_GO_BACK);
			CommonUtil.processBroadcast(con, Constants.MAIN_ICON_RUI_STATE, 4);
			if (mIat != null) {
				mIat.stopListening();
			}
			if (mIvw != null) {
				mIvw.startListening(SpeechVoiceRecognition.this);
			}
		}

		@Override
		public void onResult(RecognizerResult results, boolean isLast) {
			gson = new Gson();
			String text = JsonParser.parseIatResult(results.getResultString());
			if (text != null) {
				Log.i(TAG, text + "----------------");
				Log.d("TIMECOUNT", "说话结果:"+CommonUtil.getCurrentMinute());
				vdc = HandleSpeechData.handleDataFunction(con, text);
				if (vdc == null) {
					Log.i(TAG, "text is null");
					if (text != null) {
						// CommonUtil.processBroadcast(con,
						// Constants.MAIN_ICON_RUI_STATE, 3);
						// svc.startSpeaking("小瑞听不懂");
						// isGoback++;
						// // 五次听不懂就就让他去屎吧，重新唤醒isgoback初始化为0
						// if (isGoback > 5) {
						// mIat.stopListening();
						// }
					}
				} else {
					soundPool.play(1, 1, 1, 0, 0, 1);
					CommonUtil.processBroadcast(con,
							Constants.MAIN_ICON_RUI_STATE, 2);
					SpeechVoiceData svData = new SpeechVoiceData();
					svData.setCommand(vdc.getCommand());
					svData.setValue(gson.toJson(vdc));
					ProcessBroadcast(con, svData);

					svc = SpeechVoiceCompound.getInstance(con,
							vdc.getPromptKey(), SpeechVoiceRecognition.this);

				}
			}
			// mIat.cancel();
			// new SpeechVoiceCompound(con, content);
		}

		@Override
		public void onVolumeChanged(int volume) {
			// showTip("当前正在说话，音量大小：" + volume);
			Log.i("mContext", "当前正在说话，音量大小：" + volume);
			// ToastUtil.show2(con, "当前正在说话，音量大小：" + volume);
			Intent intentAction = new Intent("onVolumeChanged");
			intentAction.putExtra("onVolumeChanged", volume);
			con.sendBroadcast(intentAction);
		}

		@Override
		public void onEvent(int eventType, int arg1, int arg2, Bundle obj) {

			Log.i(TAG, "onEvent");
		}
	};

	/**
	 * 参数设置
	 * 
	 * @param param
	 * @return
	 */
	public void setParam() {
		// 清空参数
		mIat.setParameter(SpeechConstant.PARAMS, null);
		// String lag = mSharedPreferences.getString("iat_language_preference",
		// "mandarin");
		// 设置引擎
		mIat.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);

		// if (lag.equals("en_us")) {
		// // 设置语言
		// mIat.setParameter(SpeechConstant.LANGUAGE, "en_us");
		// } else {
		// 设置语言
		mIat.setParameter(SpeechConstant.LANGUAGE, "zh_cn");
		// 设置语言区域
		mIat.setParameter(SpeechConstant.ACCENT, "mandarin");
		// }

		// vad_bos：前端点检测；静音超时时间，即用户多长时间不说话则当做超时处理；
		// vad_eos：后断点检测；后端点静音检测时间，即用户停止说话多长时间内即认为不再输入， 自动停止录音

		// 设置语音前端点
		mIat.setParameter(SpeechConstant.VAD_BOS, "2000");
		// // 设置语音后端点
		mIat.setParameter(SpeechConstant.VAD_EOS, "8000");
		// 网络连接超时
		mIat.setParameter(SpeechConstant.NET_TIMEOUT, "15000");
		// 设置标点符号
		mIat.setParameter(SpeechConstant.ASR_PTT, "0");

		mIat.setParameter(SpeechConstant.VOLUME, "70");
		// mIat.setParameter(SpeechConstant.AUDIO_SOURCE, "-2");
		mIat.setParameter(SpeechConstant.AUDIO_SOURCE,
				MediaRecorder.AudioSource.MIC + "");
		// 设置音频保存路径
		mIat.setParameter(SpeechConstant.ASR_AUDIO_PATH,
				Environment.getExternalStorageDirectory()
						+ "/infisight/wavaudios.pcm");
	}

	/*
	 * 将语音设置为没状态
	 */
	public void destroy() {
		if (mIat != null) {
			mIat.cancel();
			mIat = null;
		}
		con = null;
	}

	/**
	 * 发送广播，在界面上进行修改
	 */
	private void ProcessBroadcast(Context context, SpeechVoiceData info) {
		gson = new Gson();
		Intent intentAction = new Intent(vdc.getCommand());
		intentAction.putExtra(vdc.getCommand(), gson.toJson(info));
		context.sendBroadcast(intentAction);
		Log.i(TAG, vdc.getCommand() + gson.toJson(info));
	}

	@Override
	public void onResult(WakeuperResult result) {
		Log.i("wake", "语音唤醒开始了");
		String text = result.getResultString();
		Log.i("wake", text);
		JSONObject object;
		try {
			object = new JSONObject(text);
			score = Integer.parseInt(object.optString("score"));
			Log.i("wake", score + ":score");
			if (score >= 1) {
				if (con != null) {
					if (mIvw != null) {
						mIvw.stopListening();
					}
					init(con);
					soundPool.play(1, 1, 1, 0, 0, 1);
					svc.startSpeaking(Constants.F_C_WHAT_YOU_DO_WAKE);
					CommonUtil.processBroadcast(con,
							Constants.MAIN_ICON_RUI_STATE, 0);
					isGoback = 0;
				}
			}
		} catch (JSONException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	@Override
	public void onError(SpeechError error) {
		Log.i("wake", error.getPlainDescription(true));
	}

	@Override
	public void onBeginOfSpeech() {
		Log.i("wake", "语音唤醒开始了");
	}

	@Override
	public void onEvent(int eventType, int isLast, int arg2, Bundle obj) {
		Log.i("wake", "语音唤醒开始了----onEvent");
	}
}
