﻿using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using SpeechLib;
using System.Diagnostics;
using System.Net.Sockets;
using System.Net;

namespace Speak
{
    public partial class Form1 : Form
    {
        //private SpeechLib.SpSharedRecoContext objRecoContext;
        private SpeechLib.SpInProcRecoContext objRecoContext;
        private SpeechLib.ISpeechRecoGrammar grammar;
        private SpeechLib.SpObjectTokenCategory category;
        private SpeechLib.SpObjectToken token;
        private string strData = "No recording yet";

        UdpClient uc;
        Encoding utf32be;
        byte[] buff;

        public Form1()
        {
            InitializeComponent();

            uc = new UdpClient();
            uc.EnableBroadcast = true;
            utf32be = new UTF32Encoding(true, true);
            start_dictation();
        }

        private void start_dictation()
        {
            try
            {
                if (objRecoContext == null)
                {
                    objRecoContext = new SpeechLib.SpInProcRecoContext();
                    //objRecoContext = new SpeechLib.SpSharedRecoContext();
                    category = new SpeechLib.SpObjectTokenCategory();
                    token = new SpeechLib.SpObjectToken();
                    category.SetId(SpeechLib.SpeechStringConstants.SpeechCategoryAudioIn, false);
                    token.SetId(category.Default, category.Id, false);
                    objRecoContext.Recognizer.AudioInput = token;
                    objRecoContext.Recognition += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);
                    objRecoContext.AudioLevel += new _ISpeechRecoContextEvents_AudioLevelEventHandler(RecoContext_AudioLevel);
                    grammar = objRecoContext.CreateGrammar(1);
                    grammar.DictationLoad("", SpeechLoadOption.SLOStatic);
                }
                grammar.DictationSetState(SpeechRuleState.SGDSActive);
                objRecoContext.State = SpeechRecoContextState.SRCS_Enabled;
            }
            catch (Exception ex)
            {
                System.Windows.Forms.MessageBox.Show("Exception caught when initializing SAPI." + " This application may not run correctly.\r\n\r\n" + ex.ToString(), "Error");
            }

        }

        private void button1_Click(object sender, System.EventArgs e)
        {
            start_dictation();
        }

        private void button2_Click(object sender, System.EventArgs e)
        {
            grammar.DictationSetState(SpeechRuleState.SGDSInactive);
        }

        public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            strData = Result.PhraseInfo.GetText(0, -1, true);
            Debug.WriteLine("Recognition: " + strData + ", " + StreamNumber + ", " + StreamPosition);
            textBox1.Text = strData;

            try
            {
                buff = utf32be.GetBytes(strData);
                uc.Send(buff, buff.Length, new IPEndPoint(IPAddress.Parse("255.255.255.255"), 50000));
            }
            catch { }
        }

        public void RecoContext_AudioLevel(int StreamNumber, object StreamPosition, int e)
        {
            //progressBar1.Value = e;
        }
    }
}
