﻿using System;
using System.Collections.Generic;
using System.Data.SqlClient;
using System.Text;
using Microsoft.SharePoint.Administration;
using Microsoft.SharePoint.Administration.Health;

namespace SPHealth.SharePoint.HealthRules.SP.Rules.Performance
{
    public class WarnDatabaseCapacity : SPRepairableHealthAnalysisRule
    {
        #region Locals
        private const string CapacityThreshold = "CapacityThreshold";
        private const string BeginHour = "BeginHour";
        private const string EndHour = "EndHour";
        private const string GrowBy = "GrowBy";
        private SortedDictionary<string, DatabaseStat> dbStatistics;
        private const string ruleTitle = "WarnDatabaseCapacity";
        private StringBuilder _failingContentDatabases;
        private int _numFailingDatabases = 0;
        private SPHealthAnalysisRuleAutomaticExecutionParameters _executionParameters;

        #region SQL queries
        private const string GET_DATABASE_REPORT =
                "begin try " +
                "declare @dbsize bigint " +
                "declare @logsize bigint " +
                "declare @database_size_mb float " +
                "declare @unallocated_space_mb float " +
                "declare @reserved_mb float " +
                "declare @data_mb float " +
                "declare @log_size_mb float " +
                "declare @index_mb float " +
                "declare @unused_mb float " +
                "declare @reservedpages bigint " +
                "declare @pages bigint " +
                "declare @usedpages bigint " +
                " " +
                "select @dbsize = sum(convert(bigint,case when status & 64 = 0 then size else 0 end)) " +
                ",@logsize = sum(convert(bigint,case when status & 64 != 0 then size else 0 end)) " +
                "from dbo.sysfiles " +
                " " +
                "select @reservedpages = sum(a.total_pages) " +
                ",@usedpages = sum(a.used_pages) " +
                ",@pages = sum(CASE " +
                "WHEN it.internal_type IN (202,204) THEN 0 " +
                "WHEN a.type != 1 THEN a.used_pages " +
                "WHEN p.index_id < 2 THEN a.data_pages " +
                "ELSE 0 " +
                "END) " +
                "from sys.partitions p " +
                "join sys.allocation_units a on p.partition_id = a.container_id " +
                "left join sys.internal_tables it on p.object_id = it.object_id " +
                " " +
                "select @database_size_mb = (convert(dec (19,2),@dbsize) + convert(dec(19,2),@logsize)) * 8192 / 1048576.0 " +
                "select @unallocated_space_mb =(case " +
                "when @dbsize >= @reservedpages then (convert (dec (19,2),@dbsize) - convert (dec (19,2),@reservedpages)) * 8192 / 1048576.0 " +
                "else 0 " +
                "end) " +
                " " +
                "select  @reserved_mb = @reservedpages * 8192 / 1048576.0 " +
                "select  @data_mb = @pages * 8192 / 1048576.0 " +
                "select  @log_size_mb = convert(dec(19,2),@logsize) * 8192 / 1048576.0 " +
                "select  @index_mb = (@usedpages - @pages) * 8192 / 1048576.0 " +
                "select  @unused_mb = (@reservedpages - @usedpages) * 8192 / 1048576.0 " +
                " " +
                "select " +
                "@database_size_mb as database_size_mb " +
                ",       @reserved_mb as reserved_mb " +
                ",       @unallocated_space_mb as unallocated_space_mb " +
                ",       (@reserved_mb + @unallocated_space_mb) as data_size " +
                ",       @log_size_mb as transaction_log_size " +
                ",       cast(@unallocated_space_mb*100.0/(@reserved_mb + @unallocated_space_mb) as decimal(10,2))as  unallocated " +
                ",       cast(@reserved_mb*100/(@reserved_mb + @unallocated_space_mb) as decimal(10,2))as reserved " +
                ",       cast(@data_mb*100/(@reserved_mb + @unallocated_space_mb) as decimal(10,2))as data " +
                ",       cast(@index_mb*100/(@reserved_mb + @unallocated_space_mb) as decimal(10,2)) as index_1 " +
                ",       cast(@unused_mb*100/(@reserved_mb + @unallocated_space_mb) as decimal(10,2))as unused; " +
                " " +
                "end try " +
                "begin catch " +
                "select " +
                "1 as database_size_mb " +
                ",       ERROR_NUMBER() as reserved_mb " +
                ",       ERROR_SEVERITY() as unallocated_space_mb " +
                ",       ERROR_STATE() as data_size " +
                ",       1 as transaction_log_size " +
                ",       ERROR_MESSAGE() as unallocated " +
                ",       -100 as reserved " +
                ",       1 as data " +
                ",       1 as index_1 " +
                ",       1 as unused " +
                "end catch";

        private const string GROW_DATABASE = "alter database {0} modify file ( Name = {1}, Size = {2})";

        #endregion

        #endregion

        #region Constructor
        public WarnDatabaseCapacity()
        {
            _failingContentDatabases = new StringBuilder();
            _numFailingDatabases = 0;
        }
        #endregion

        #region Fields

        public override string Summary
        {
            get { return "One or more Content Databases are filling up and may trigger an autogrowth."; }
        }

        public override string Explanation
        {
            get
            {
                return String.Format("The following {0}Content Database{1} getting full:\r\n\r\n{2}",
                    _numFailingDatabases > 1 ? _numFailingDatabases.ToString() + " " : "",
                    _numFailingDatabases > 1 ? "s are" : " is",
                    _failingContentDatabases.ToString());
            }
        }

        public override string Remedy
        {
            get { return "Proactive managment of database growth is recommend including as much as possible, the pre-growth of all data and log files to their anticipated final size. See Storage and SQL Server capacity planning and configuration (SharePoint Server 2010) for further details: http://technet.microsoft.com/en-us/library/cc298801.aspx"; }
        }

        public override SPHealthCheckErrorLevel ErrorLevel
        {
            get { return SPHealthCheckErrorLevel.Warning; }
        }

        public override SPHealthCategory Category
        {
            get { return SPHealthCategory.Performance; }
        }

        public override SPHealthAnalysisRuleAutomaticExecutionParameters AutomaticExecutionParameters
        {
            get
            {
                if (_executionParameters == null)
                {
                    _executionParameters = new SPHealthAnalysisRuleAutomaticExecutionParameters
                    {
                        Schedule = SPHealthCheckSchedule.Hourly,
                        Scope = SPHealthCheckScope.Any,
                        ServiceType = typeof(SPTimerService), //Tell SP if we want our own timer job or can we run with others...
                        RepairAutomatically = false
                    };
                }
                return _executionParameters;
            }
        }

        #endregion

        #region Methods

        // used to provide the configuration list item title
        public static string GetTitle()
        {
            // return the title for the rule
            return ruleTitle;
        }

        // used to provide the default configuration settings
        public static IDictionary<string, string> GetDefaultSettings()
        {
            // return the default settings for the rule
            IDictionary<string, string> settings = new Dictionary<string, string>();
            settings.Add("Comment1", "Enter the database capacity percentage that is used to trigger");
            settings.Add("Comment2", "a warning and potentially a scheduled database expansion. Values");
            settings.Add("Comment3", "should be between 0.0 and 1.0.");
            settings.Add(CapacityThreshold, "0.8");
            settings.Add("Comment4", "Enter the BeginHour for the time period in which database");
            settings.Add("Comment5", "expansions should occur.");
            settings.Add(BeginHour, "1");
            settings.Add("Comment6", "Enter the EndHour for the time period in which database");
            settings.Add("Comment7", "expansions should occur.");
            settings.Add(EndHour, "3");
            settings.Add("Comment8", "Enter the percentage of growth the database should undertake");
            settings.Add("Comment9", "during an expansion. Values should be between 0.0 and 1.0.");
            settings.Add(GrowBy, "0.3");
            return settings;
        }

        public override SPHealthCheckStatus Check()
        {
            if (!SPFarm.Joined)
            {
                throw new InvalidOperationException();
            }

            // load the stats for the content databases
            dbStatistics = GetContentDatabases();

            // analyze the db dtats
            foreach (DatabaseStat dbStatistic in dbStatistics.Values)
            {
                // is the database over it's warning threshold
                if (dbStatistic.IsOverThreshold)
                {
                    // the database is over the capacity limit
                    _failingContentDatabases.AppendLine(string.Format("{0} is {1}% full.", dbStatistic.Name, ((double)100.00 - dbStatistic.unallocated).ToString("0.#")));
                    _numFailingDatabases++;
                }
            }

            if (_numFailingDatabases == 0)
                return SPHealthCheckStatus.Passed;

            return SPHealthCheckStatus.Failed;
        }

        public override SPHealthRepairStatus Repair()
        {
            if (!SPFarm.Joined)
            {
                throw new InvalidOperationException();
            }

            // reset our counters (stats are already loaded)
            _failingContentDatabases = new StringBuilder();
            _numFailingDatabases = 0;

            // grow the databases where needed
            foreach (DatabaseStat dbStatistic in dbStatistics.Values)
            {
                // is the database over the warning threshold
                if (dbStatistic.IsOverThreshold)
                    GrowDatabase(dbStatistic);
            }

            // Did we manage to grow all the databases?
            if (_numFailingDatabases == 0)
                return SPHealthRepairStatus.Succeeded;

            //Return the status
            return SPHealthRepairStatus.Failed;
        }

        private void GrowDatabase(DatabaseStat dbStatistic)
        {
            // determine if we are between the BeginHour and EndHour
            if (DateTime.Now.Hour >= Convert.ToInt32(common.GetRuleSetting(BeginHour, ruleTitle)) && DateTime.Now.Hour < Convert.ToInt32(common.GetRuleSetting(EndHour, ruleTitle)))
            {
                // the database is over the capacity limit so lets attempt to grow it
                try
                {
                    using (SqlConnection connection = new SqlConnection(dbStatistic.connectionString))
                    {
                        int newSize = Convert.ToInt32(dbStatistic.database_size_mb * ((double)1.0 + Convert.ToDouble(common.GetRuleSetting(GrowBy, ruleTitle))));

                        // check the new db size isn't over the maximum recommend size for a content database
                        int maxSizeGB = Convert.ToInt32(common.GetRuleSetting("MaxRecomendedContentDBSizeGB",
                            SPHealth.SharePoint.HealthRules.SP.Rules.Configuration.WarnContentDatabaseSize.GetTitle()));

                        if ((newSize / 1024) > maxSizeGB)
                        {
                            _numFailingDatabases++;
                            _failingContentDatabases.AppendLine(string.Format("Database {0} should not be grown above the maximum recommend Content Database Size of {1}GB."
                                , dbStatistic.Name, maxSizeGB));
                        }
                        else
                        {

                            connection.Open();
                            string commandText = string.Format(GROW_DATABASE, dbStatistic.Name, dbStatistic.Name, newSize.ToString());
                            using (SqlCommand command = new SqlCommand { Connection = connection, CommandText = commandText })
                            {
                                object temp = command.ExecuteScalar();
                                if (temp != null)
                                {
                                }
                            }
                        }
                    }
                }
                catch (Exception ex)
                {
                    _numFailingDatabases++;
                    _failingContentDatabases.AppendLine(string.Format("Error growing database {0}: {1}", dbStatistic.Name, ex.Message));
                }
            }
        }

        private SortedDictionary<string, DatabaseStat> GetContentDatabases()
        {
            SortedDictionary<string, DatabaseStat> stats = new SortedDictionary<string, DatabaseStat>();

            // get all the content databases statistics
            foreach (SPWebApplication webApplication in SPWebService.ContentService.WebApplications)
            {
                if (webApplication.Status == SPObjectStatus.Online)
                {
                    foreach (SPContentDatabase contentDatabase in webApplication.ContentDatabases)
                    {
                        try
                        {
                            DatabaseStat stat = GetContentDatabaseStatistics(contentDatabase);
                            if (stat != null)
                                stats.Add(stat.Name, stat);
                        }
                        catch (Exception ex)
                        {
                            _numFailingDatabases++;
                            _failingContentDatabases.AppendLine(string.Format("Error harvesting database statistics for {0}: {1}", contentDatabase.Name, ex.Message));
                        }
                    }
                }
            }

            return stats;
        }

        private DatabaseStat GetContentDatabaseStatistics(SPContentDatabase contentDatabase)
        {

            DatabaseStat stat = new DatabaseStat();
            try
            {
                using (SqlConnection connection = new SqlConnection(contentDatabase.DatabaseConnectionString))
                {
                    connection.Open();
                    using (SqlCommand command = new SqlCommand { Connection = connection, CommandText = GET_DATABASE_REPORT })
                    {
                        SqlDataReader reader = command.ExecuteReader();
                        if (reader != null && reader.HasRows)
                        {
                            reader.Read();
                            stat = new DatabaseStat
                            {
                                Name = contentDatabase.Name,
                                Date = DateTime.Now,
                                connectionString = contentDatabase.DatabaseConnectionString,
                                database_size_mb = Convert.ToDouble(reader["database_size_mb"]),
                                capacityThreshold = Convert.ToDouble(common.GetRuleSetting(CapacityThreshold, ruleTitle)),
                                reserved_mb = Convert.ToDouble(reader["reserved_mb"]),
                                unallocated_space_mb = Convert.ToDouble(reader["unallocated_space_mb"]),
                                data_size = Convert.ToDouble(reader["data_size"]),
                                transaction_log_size = Convert.ToDouble(reader["transaction_log_size"]),
                                unallocated = Convert.ToDouble(reader["unallocated"]),
                                reserved = Convert.ToDouble(reader["reserved"]),
                                data = Convert.ToDouble(reader["data"]),
                                index_1 = Convert.ToDouble(reader["index_1"]),
                                unused = Convert.ToDouble(reader["unused"])
                            };
                        }
                    }
                }
            }
            catch { /* best endeavours */ }

            return stat;
        }

        private class DatabaseStat
        {
            public string Name { get; set; }
            public DateTime Date { get; set; }
            public string connectionString { get; set; }
            public double capacityThreshold { get; set; }
            public double database_size_mb { get; set; }
            public double reserved_mb { get; set; }
            public double unallocated_space_mb { get; set; }
            public double data_size { get; set; }
            public double transaction_log_size { get; set; }
            private double _unallocated;
            public double unallocated
            {
                get { return _unallocated; }
                set
                {
                    _unallocated = value;

                    if ((double)100.00 - _unallocated > (capacityThreshold * (double)100.00))
                        this.IsOverThreshold = true;
                }
            }
            private bool isOverThreshold = false;
            public bool IsOverThreshold
            {
                get
                {
                    return isOverThreshold;
                }
                private set
                {
                    isOverThreshold = value;
                }
            }
            public double reserved { get; set; }
            public double data { get; set; }
            public double index_1 { get; set; }
            public double unused { get; set; }
            public double log_size_mb { get; set; }
            public double log_space_used { get; set; }
            public double log_space_unsed { get; set; }

        }

        #endregion

    }
}
