#!/usr/bin/env python3
# Author: Armit
# Create Time: 2023/03/14 

import torch
from torch.optim import Adam
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

from torch import Tensor

device = 'cuda' if torch.cuda.is_available() else 'cpu'


df = pd.read_csv('Fl_L1_ISR.csv')
X = torch.from_numpy(df[df.columns[1:]].to_numpy().T).float().to(device)
print('X.shape:', X.shape)

if not 'log':
  X = (X + 1e-5).log()

if 'model (linear scoring)':
  N, D = X.shape
  W = torch.arange(1, D+1) / 10   # init weight vector as [0.1, 0.2, ..., 1.0]
  W.unsqueeze_(dim=-1)            # [D, 1]
  W = W.to(device)
  H = X @ W     # [N, D] x [D, 1] => [N, 1]


# ref: https://blog.51cto.com/u_15351682/3729915
def get_loss(data:Tensor):
  mean = torch.mean(data)
  diffs = data - mean
  var = torch.mean(torch.pow(diffs, 2.0))
  std = torch.pow(var, 0.5)
  z_scores = diffs / std

  # 偏度：数据分布偏斜方向、程度的度量, 是数据分布非对称程度的数字特征
  # 定义: 偏度是样本的三阶标准化矩
  skewness = torch.mean(torch.pow(z_scores, 3.0))

  # excess kurtosis, should be 0 for Gaussian
  # 峰度(kurtosis): 表征概率密度分布曲线在平均值处峰值高低的特征数
  # 若峰度(kurtosis) > 3, 峰的形状会比较尖, 会比正态分布峰陡峭
  kurtoses = torch.mean(torch.pow(z_scores, 4.0)) - 3.0

  return (skewness + kurtoses).abs()
  #return skewness.abs()


lr = 0.0001
steps = 8000

DW = torch.zeros_like(W, device=W.device)     # 寻找一个加性残差
DW.requires_grad = True
optimizer = Adam([DW], lr, weight_decay=10)   # L2惩罚约束残差的绝对值尽量小

last_loss = 100.0
for i in range(steps):
  Y = X @ (W + DW)

  optimizer.zero_grad()
  loss = get_loss(Y)
  loss.backward()
  optimizer.step()

  if i % 10 == 0:
    with torch.no_grad():
      print(f'>> loss: {loss.item():.7f}, weights: {[round(x, 3) for x in (W + DW).detach().squeeze().cpu().numpy().tolist()]}')
      if abs(loss.item() - last_loss) < 1e-6: break 
      last_loss = loss.item()

DW.requires_grad = False


def to_matrix(X: Tensor) -> np.ndarray:
  X = X.squeeze().detach()
  X = X.reshape((20, 20))
  return X.cpu().numpy()


if 'show hist':
  Y = X @ (W + DW)     # [N, D] x [D, 1] => [N, 1]

  plt.clf()
  plt.subplot(311) ; plt.hist(X.flatten().cpu().numpy(), bins=50) ; plt.title('X')
  plt.subplot(312) ; plt.hist(H.flatten().cpu().numpy(), bins=50) ; plt.title('naive score')
  plt.subplot(313) ; plt.hist(Y.flatten().cpu().numpy(), bins=50) ; plt.title('learned score')
  plt.tight_layout()
  plt.show()

  plt.clf()
  mH = to_matrix(H)
  mY = to_matrix(Y)
  mD = mY - mH
  plt.subplot(131) ; sns.heatmap(mH) ; plt.title('naive score')
  plt.subplot(132) ; sns.heatmap(mY) ; plt.title('learned score')
  plt.subplot(133) ; sns.heatmap(mD) ; plt.title('score diff')
  plt.show()
