# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: tu@tulrich.com (Thatcher Ulrich)

"""Statistics utility.
"""

import math


class Distro:
  """Incrementally estimate the distribution of a streaming dataset.

  Insert data via distro.push_data().

  Read the incremental values any time (though data is processed in
  batches, so before the first batch is processed you basically get
  meaningless defaults).
  
  Call distro.update_estimates() before you read the final values.

  Measures min, max exactly, and estimates the quantiles [0.01, 0.25,
  0.5, 0.75, 0.99]

  Using EWSA == "exponentially weighted stochastic approximation"
  
  Algo from:
  _Incremental Quantile Estimation for Massive Tracking_
  Fei Chen, Diane Lambert, and Jose C. Pinheiro (2000)
  """

  def __init__(self, m=100):
    self.reset(m)

  def reset(self, m=100):
    self.has_estimates = 0
    self.total_data_count = 0
    self.min_val = 0
    self.max_val = 0
    self.w = 0.05
    self.q = [0.01, 0.25, 0.50, 0.75, 0.99]
    self.S = [0, 0, 0, 0, 0]
    self.f = [0, 0, 0, 0, 0]
    self.M = m
    self.data = []

  def serialize(self):
    """Return a list of float values that encapsulates our state.

    TODO: needs a unit test!
    """
    out = [0] * 22
    out[0:5] = [1,  # version
                self.has_estimates,
                self.total_data_count,
                self.min_val,
                self.max_val]
    out[5:10] = self.q
    out[10:15] = self.S
    out[15:20] = self.f
    out[20] = self.w
    out[21] = self.M
    out.extend(self.data)
    return out

  def deserialize(self, state):
    """Re-initialize our state from the given list of floats.

    TODO: needs a unit test!
    """
    if len(state) < 22:
      self.reset()
      return

    (version,
     self.has_estimates, 
     self.total_data_count,
     self.min_val,
     self.max_val,
     ) = state[0:5]
    if version != 1:
      self.reset()
      return
    self.q = state[5:10]
    self.S = state[10:15]
    self.f = state[15:20]
    self.w = state[20]
    self.M = state[21]
    self.data = state[22:]

  def push_data(self, x):
    self.data.append(float(x))
    if len(self.data) == self.M:
      self.update_estimates()

  def get_data_count(self):
    """Return the number of data elements that we have aggregated so far.
    """
    return self.total_data_count

  def get_quantile(self, i):
    return self.S[i]

  def get_min(self):
    return self.min_val

  def get_max(self):
    return self.max_val

  def update_estimates(self):
    if len(self.data) == 0:
      return

    if self.has_estimates == 0:
      self.initial_step()
    else:
      self.incremental_step()

  def initial_step(self):
    c = 0
    M = len(self.data)
    assert(M > 0)
    for i in range(0,M):
      c += 1 / math.sqrt(i + 1)
    # Compute quantiles for this batch of data.
    self.data = sorted(self.data)
    self.min_val = self.data[0]
    self.max_val = self.data[-1]
    for j in range(0,len(self.q)):
      self.S[j] = self.data[int(M * self.q[j])]
    r = max(0.001, self.S[3] - self.S[1])
    c_prev = r / M * c

    count_x_abs_diff_le_c = [0, 0, 0, 0, 0]
    for i in range(0,M):
      for j in range(0,len(self.q)):
        if abs(self.data[i] - self.S[j]) < c_prev:
          count_x_abs_diff_le_c[j] += 1
    for j in range(0,len(self.q)):
      self.f[j] = 1 / (2 * c_prev * M) * max(1, count_x_abs_diff_le_c[j])

    self.total_data_count += M
    self.data = []
    self.has_estimates = 1


  def incremental_step(self):
    # compute c, r_prev, c_prev.
    c = 0
    M = len(self.data)
    assert(M > 0)
    for i in range(0,M):
      c += 1 / math.sqrt(M + i + 1)
    r_prev = max(0.001, self.S[3] - self.S[1])
    c_prev = r_prev / M * c

    # Scan current batch of data and count stats.
    count_x_lessequal_s = [0, 0, 0, 0, 0]
    count_x_abs_diff_le_c = [0, 0, 0, 0, 0]
    for i in range(0,M):
      self.min_val = min(self.min_val, self.data[i])
      self.max_val = max(self.max_val, self.data[i])
      for j in range(0,5):
        if self.data[i] <= self.S[j]:
          count_x_lessequal_s[j] += 1
        if abs(self.data[i] - self.S[j]) <= c_prev:
          count_x_abs_diff_le_c[j] += 1

    # Update estimates.
    S1 = [0, 0, 0, 0, 0]
    f1 = [0, 0, 0, 0, 0]
    for i in range(0,len(self.S)):
      S1[i] = self.S[i] + self.w / self.f[i] * (self.q[i] - float(count_x_lessequal_s[i]) / M)
      f1[i] = (1 - self.w) * self.f[i] + self.w / (2 * c_prev * M) * count_x_abs_diff_le_c[i]
      S1[i] = max(self.min_val, min(S1[i], self.max_val))  # clamp
      
    self.S = S1
    self.f = f1
    self.total_data_count += M
    self.data = []


#  # EWSA == exponentially weighted stochastic approximation
#
#  S(0) = qth sample quantile of X(0,0) ... X(0,M)
#  r(0) = difference of 0.75 and 0.25 sample quantiles
#  c(0) = r(0)/M * sum(i=1.M, 1/sqrt(i))
#  f(0) = 1/(2*c(0)*M) * max(count(|X(0,i) - S(0)| <= c(0)}, 1)
#
#  # for next batch of M observations:
#  c = sum(i=M+1..2M, 1/sqrt(i) / M)
#  S(n) = S(n-1) + w/f(n-1) * (q - count(X(n,i) <= S(n-1)) / M)
#  f(n) = (1 - w) * f(n-1) + w / (2*c(n-1)*M) * count(|X(n,i) - S(n-1)| <= c(n-1))
#
#  r(n) = S[q=0.75](n-1) - S[q=0.25](n-1)
#  c(n) = r(n) * c
#
#  w is a tuning parameter.  Paper says 0.05 is good.
