﻿using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Text;
using static System.Windows.Forms.AxHost;

namespace SerialPort_Assistant
{
    partial class Q_Learning
    {
        public int Max_Steps_Per_Episode { get; set; }
        public double Learning_Rate { get; set; }
        public double Discount_Rate { get; set; }
        public double Exploration_Rate { get; set; }
        public double Max_Exploration_Rate { get; set; }
        public double Min_Exploration_Rate { get; set; }
        public double Exploration_Decay_Rate { get; set; }

        public static Form1 env;

        private double[,] q_Table;
        private double[] state_rewards;

        public Q_Learning(
            Form1 environment,
            int Max_Steps_Per_Episode = 100,
            double Learning_Rate = 0.1,
            double Discount_Rate = 0.99,
            double Exploration_Rate = 1,
            double Max_Exploration_Rate = 1,
            double Min_Exploration_Rate = 0.01,
            double Exploration_Decay_Rate = 0.001
            )
        {
            env = environment;
            this.Max_Steps_Per_Episode = Max_Steps_Per_Episode;
            this.Learning_Rate = Learning_Rate;
            this.Discount_Rate = Discount_Rate;
            this.Exploration_Rate = Exploration_Rate;
            this.Max_Exploration_Rate = Max_Exploration_Rate;
            this.Min_Exploration_Rate = Min_Exploration_Rate;
            this.Exploration_Decay_Rate = Exploration_Decay_Rate;
        }
         public int TakeAction(int state)
        {
            for (int i = 0; i < state_rewards.Length; i++)
                state_rewards[i] = q_Table[state, i];

            return Array.IndexOf(state_rewards, state_rewards.Max());
        }
        public double[] rewards(int state)
        { 
             for (int i = 0; i < state_rewards.Length; i++)
                state_rewards[i] = q_Table[state, i];
            return state_rewards;
        }
        public void Update(int num_episodes = 10_000, int num_console_outputs = 10)
        {
            q_Table = new double[env.Observation_Space_N, env.Action_Space_N];
            Array.Clear(q_Table, 0, q_Table.Length);
            state_rewards = new double[q_Table.GetLength(1)];
            Random rand = new Random();

            double reward, exploration_rate_threshold;
            bool done;
            int action;
            (int,int)state, new_state;
            int state_idx, new_state_idx;
            float outputs = num_episodes / num_console_outputs;

            List<double> rewards_all_episodes = new List<double>();
            List<double> exploration_rate_all_episodes = new List<double>();

            for (int episode = 0; episode < num_episodes; episode++)
            {
                state = env.Reset();
                 state_idx = env.GetStateIdx(state);
                done = false;
                double rewards_current_episode = 0;
                for (int step = 0; step < Max_Steps_Per_Episode; step++)
                {
                    exploration_rate_threshold = rand.NextDouble();
                    if (exploration_rate_threshold > Exploration_Rate)
                    {
                        action = TakeAction(state_idx);
                    }
                    else 
                    {
                        action = env.Random_Action();
                    }
                    (new_state, reward, done) = env.Step(action);
                    new_state_idx = env.GetStateIdx(new_state);
                    q_Table[state_idx, action] = q_Table[state_idx, action] * (1 - Learning_Rate) +
                        Learning_Rate * (reward + Discount_Rate * rewards(new_state_idx).Max());
                    state = new_state;
                    rewards_current_episode += reward;
                    if (done == true)
                        break;
                }

                Exploration_Rate = Min_Exploration_Rate + (Max_Exploration_Rate - Min_Exploration_Rate) * Math.Exp(-Exploration_Decay_Rate * episode);

                rewards_all_episodes.Add(rewards_current_episode);
                exploration_rate_all_episodes.Add(Exploration_Rate);
                if (episode % outputs == 0)
                {
                    double avg_exploration_rate = Math.Round(exploration_rate_all_episodes.Average(), 2);
                    double avg_reward = Math.Round(rewards_all_episodes.Average(), 2);
                    Console.WriteLine(episode + " episodes | exploration rate = " + avg_exploration_rate + " | average reward = " + avg_reward);
                    rewards_all_episodes.Clear();
                    exploration_rate_all_episodes.Clear();
                }
            }
        }



        

    }
}