/*
 * Copyright (C) 2016-2017 AnStudio
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
 * use this file except in compliance with the License. You may obtain a copy of
 * the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */

package com.astudio.lhasa.workers.recognizers;

import android.content.Context;
import android.os.Bundle;
import android.util.Log;

import com.astudio.lhasa.MyApplication;
import com.astudio.lhasa.constants.Languages;
import com.astudio.lhasa.interfaces.IInterpreterCallback;
import com.astudio.lhasa.utils.CollectionHelper;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.RecognizerListener;
import com.iflytek.cloud.RecognizerResult;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechRecognizer;

import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.json.JSONTokener;

import java.security.InvalidParameterException;
import java.util.ArrayList;
import java.util.List;

/**
 * {@link SpeechInputRecognizer} is used to convert the speech to text, which
 *      acts as S(peech)T(o)T(ext).
 *  It\'s designed as singleton, use <code>startListening</code> to start a STT session, and
 *      <code>stopListening</code> to end this session if the recognizing words are still in need,
 *      otherwise <code>cancelRecognizer</code> to drop the recognizing words.
 */
public class SpeechInputRecognizer {
    private static final String TAG = SpeechInputRecognizer.class.getSimpleName();
    private static volatile SpeechInputRecognizer mInstance = null;
    private Context mContext = MyApplication.getTIApplication();
    /** Real recognizer on Java layer */
    private SpeechRecognizer mRecognizer = null;
    /** Callback which is used to notify client that recognizer state changes */
    private IInterpreterCallback mCallback = null;
    /** Certain language should be defined here, mandarin by default */
    private int mLanguage = Languages.INTERPRETER_LANGUAGE_CHINESE_MANDARIN;

    private int mCurHandler = -1;

    /** iFlytek internal SDK callback */
    private RecognizerListener mRecognizerListener = new RecognizerListener(){
        @Override
        public void onResult(RecognizerResult results, boolean isLast) {
            if (mCallback != null) {
                mCallback.onRecognizeDone(mCurHandler, CollectionHelper.ListToString(
                        parseJSON2Str(results.getResultString())), isLast);
            }
        }
        @Override
        public void onError(SpeechError error) {
            Log.e(TAG, error.getPlainDescription(true));
            if (mCallback != null) {
                mCallback.onInterpreterError(mCurHandler, error.getErrorCode()
                        , error.getPlainDescription(true));
            }
        }
        @Override
        public void onVolumeChanged(int i, byte[] bytes) {
            if (mCallback != null) {
                mCallback.onVolumeChange(mCurHandler, i, bytes);
            }
        }
        @Override
        public void onBeginOfSpeech() {
            //TODO: Add lines to handle speech begins
        }
        @Override
        public void onEndOfSpeech() {
            //TODO: Add lines to handle speech ends
        }
        @Override
        public void onEvent(int eventType, int arg1, int arg2, Bundle obj) {
            //TODO: Add lines to handle speech events
        }
    };

    private SpeechInputRecognizer() {
        //Recognizer initialize
        mRecognizer = SpeechRecognizer.createRecognizer(mContext, new InitListener() {
            @Override
            public void onInit(int i) {
                Log.e("aifeng", "onInit: " + i);
            }
        });
        mRecognizer.setParameter(SpeechConstant.DOMAIN, "iat");
    }

    /**
     * Update the recognizer language
     * @param lang language id
     */
    private void updateRecognizeLanguage(int lang) {
        mLanguage = lang;
        mRecognizer.setParameter(SpeechConstant.LANGUAGE, "zh_cn");
        mRecognizer.setParameter(SpeechConstant.ACCENT, Languages.getLanguageNameByType(
                Languages.LANGUAGE_NAME_TYPE_FOR_SPEECH_RECOGNIZER, mLanguage));
        if (mLanguage == Languages.INTERPRETER_LANGUAGE_CHINESE_TIBETAN) {
            //For tibetan, a 16K sample rate is need
            mRecognizer.setParameter(SpeechConstant.SAMPLE_RATE, "16000");
        }
    }

    /**
     * Since the result from server if formatted in JSON, it should be translated to list of String
     * @param jStr
     * @return
     */
    private List<String> parseJSON2Str(String jStr) {
        List<String> ret = new ArrayList<>();
        /**
         * {"sn":1,"ls":false,"bg":0,"ed":0,"ws":
         * [{"bg":0,"cw":[{"sc":0.00,"w":"你"}]},{"bg":0,"cw":[{"sc":0.00,"w":"们"}]}]}
         */
        try {
            JSONTokener jsonParser = new JSONTokener(jStr);
            JSONObject result = (JSONObject) jsonParser.nextValue();

            //[{"bg":0,"cw":[{"sc":0.00,"w":"你"}]},{"bg":0,"cw":[{"sc":0.00,"w":"们"}]}]
            JSONArray ws = result.getJSONArray("ws");
            for (int i = 0; i < ws.length(); i++) {
                JSONObject jo = (JSONObject) ws.get(i);
                //[{"sc":0.00,"w":"你"}]
                JSONArray cw = jo.getJSONArray("cw");
                for (int j = 0; j < cw.length(); j++) {
                    JSONObject innerJO = (JSONObject) cw.get(j);
                    //"你"
                    ret.add(innerJO.getString("w"));
                }
            }
        } catch (JSONException ex) {
            ex.printStackTrace();
        } finally {
            return ret;
        }
    }

    /**
     * Get instance of this recognizer
     * @return  instance if exists, otherwise new an instance
     */
    public static SpeechInputRecognizer getInstance() {
        if (mInstance == null) {
            synchronized (SpeechInputRecognizer.class) {
                mInstance = new SpeechInputRecognizer();
            }
        }

        return mInstance;
    }

    /**
     * Start speech recognize
     * @param handler
     * @param callback
     * @param language
     * @return
     * @throws InvalidParameterException
     */
    public boolean startListening(int handler, IInterpreterCallback callback, int language)
            throws InvalidParameterException {
        Log.d(TAG, "Start Speech Recognizer");
        if (mInstance == null || mRecognizer == null || callback == null) {
            throw new InvalidParameterException("Use recognizer without initialization");
        }

        mCallback = callback;
        updateRecognizeLanguage(language);

        mRecognizer.startListening(mRecognizerListener);
        mCurHandler = handler;

        if (mCallback !=  null) {
            //Let\'s Go
            mCallback.onRecognizeStart(handler);
        }
        return true;
    }

    /**
     * End recognize session and DROP the rest part which is under recognizing
     * @return
     */
    public boolean cancelRecognizer() {
        mRecognizer.cancel();
        return true;
    }

    /**
     * End recognize session and wait for the rest part which is under recognizing
     * @return
     */
    public boolean stopListening() {
        mRecognizer.stopListening();
        Log.d(TAG, "Stop Speech Recognizer");
        return true;
    }

    /**
     * Check recognizer state
     * @return
     */
    public boolean isRecognizing() {
        return mRecognizer.isListening();
    }
}
