/*
 * SPDX-License-Identifier: Apache-2.0
 *
 * The OpenSearch Contributors require contributions made to
 * this file be licensed under the Apache-2.0 license or a
 * compatible open source license.
 *
 * Modifications Copyright OpenSearch Contributors. See
 * GitHub history for details.
 */

/*
 * Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package org.opensearch.performanceanalyzer.rca.store.rca.hotheap;


import java.util.HashMap;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jooq.Record;
import org.jooq.Result;
import org.opensearch.performanceanalyzer.metricsdb.MetricsDB;
import org.opensearch.performanceanalyzer.rca.framework.api.Metric;
import org.opensearch.performanceanalyzer.rca.framework.api.flow_units.MetricFlowUnit;
import org.opensearch.performanceanalyzer.rca.store.rca.hotshard.IndexShardKey;

/**
 * We've seen huge performance impact if collecting node stats across all shards on data node. So
 * Performance Analyzer writer will only try to collect node stats from a small portion of shards at
 * a time to reduce performance impact. This class on reader side will allow us to keep track of
 * node stat from all previous batches and calculate its sum.
 */
public class NodeStatAggregator {

    private static final Logger LOG = LogManager.getLogger(NodeStatAggregator.class);
    private Metric nodeStatMetric;
    private int sum;
    private final HashMap<IndexShardKey, NodeStatValue> shardKeyMap;
    private long lastPurgeTimestamp;
    // purge the hash table every 30 mins
    private static final int PURGE_HASH_TABLE_INTERVAL_IN_MINS = 30;

    public NodeStatAggregator(Metric nodeStatMetric) {
        this.nodeStatMetric = nodeStatMetric;
        this.sum = 0;
        this.lastPurgeTimestamp = 0L;
        this.shardKeyMap = new HashMap<>();
    }

    /**
     * Whether this NodeStatAggregator contains valid node stats from writer This is to avoid
     * reading stale data when the node stats has already been disabled from writer side
     *
     * @return if it has valid node stats
     */
    public boolean isEmpty() {
        return shardKeyMap.isEmpty();
    }

    /**
     * get the name of node stat metric. i.e. Norms_Memory, Cache_FieldData_Size, etc.
     *
     * @return name of node stat metric
     */
    public String getName() {
        return nodeStatMetric.name();
    }

    /**
     * get the sum of node stat metric across all shards on this node
     *
     * @return sum of node stat metric
     */
    public int getSum() {
        return this.sum;
    }

    /**
     * call this method to collect node stats of each shard from node stat metric and calculate its
     * sum.
     *
     * @param timestamp current timestamp when collecting from metricDB
     */
    public void collect(final long timestamp) {
        for (MetricFlowUnit metric : nodeStatMetric.getFlowUnits()) {
            if (metric.isEmpty()) {
                continue;
            }
            Result<Record> result = metric.getData();
            for (Record record : result) {
                try {
                    IndexShardKey shardKey = IndexShardKey.buildIndexShardKey(record);
                    Integer value = record.getValue(MetricsDB.MAX, Integer.class);
                    NodeStatValue oldNodeStatValue =
                            shardKeyMap.getOrDefault(shardKey, new NodeStatValue(0, 0));
                    shardKeyMap.put(shardKey, new NodeStatValue(value, timestamp));
                    this.sum += (value - oldNodeStatValue.getValue());
                } catch (Exception e) {
                    LOG.error("Fail to parse node stats {}", this.getName());
                }
            }
        }
        // purge the hashtable
        if (TimeUnit.MILLISECONDS.toMinutes(timestamp - this.lastPurgeTimestamp)
                > PURGE_HASH_TABLE_INTERVAL_IN_MINS) {
            purgeHashTable(timestamp);
        }
    }

    // shards can be deleted from OpenSearch while still remains in this hashtable
    // or we might disable the Node Stats collector on writer to stop sending node stats to reader
    // in either case, we need to write a function to clean up this hashtable on reader periodically
    // to remove node stats of inactive shards
    private void purgeHashTable(final long timestamp) {
        Iterator<NodeStatValue> iterator = this.shardKeyMap.values().iterator();
        while (iterator.hasNext()) {
            NodeStatValue value = iterator.next();
            long timestampDiff = timestamp - value.getTimestamp();
            if (TimeUnit.MILLISECONDS.toMinutes(timestampDiff)
                    > PURGE_HASH_TABLE_INTERVAL_IN_MINS) {
                this.sum -= value.getValue();
                iterator.remove();
            }
        }
    }

    private static class NodeStatValue {
        private int value;
        private long timestamp;

        public NodeStatValue(int value, long timestamp) {
            this.value = value;
            this.timestamp = timestamp;
        }

        public int getValue() {
            return this.value;
        }

        public long getTimestamp() {
            return this.timestamp;
        }
    }
}
