package cloud.tianai.neuron.regression;

import cloud.tianai.neuron.common.Matrix;
import cloud.tianai.neuron.common.NumPy;
import cloud.tianai.neuron.common.util.NormalizeData;
import cloud.tianai.neuron.common.util.RegressionUtils;
import cloud.tianai.neuron.optimizer.OptimizerEnum;
import lombok.Getter;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Consumer;

import static cloud.tianai.neuron.common.util.RegressionUtils.NONE_NORMALIZE;

/**
 * @Author: 天爱有情
 * @date 2022/12/11 18:28
 * @Description 逻辑回归
 */
@Getter
public class LogisticRegression implements Serializable {


    private static final long serialVersionUID = -9123709617784927140L;
    /** 向量. */
    Matrix thetas;

    /** 多项式次数. */
    int polynomialDegree;
    /** sin次数. */
    int sinusoidDegress;

    /** 归一化权重参数. */
    Matrix normalizationWeight1;
    Matrix normalizationWeight2;
    Matrix x2NormalizationWeight1;
    Matrix x2NormalizationWeight2;
    /** 归一化类型，0 min-Max， 1 标准差， -1 不进行归一化操作. */
    int normalizeType;

    public LogisticRegression() {
    }

    public Matrix gradientDescent(Matrix thetas, Matrix datas, Matrix labels, double rate, int numIterations, OptimizerEnum optimizer, Consumer<Double> loss) {
        long start = System.currentTimeMillis();
        Matrix currentThetas = thetas;
        for (int i = 0; i < numIterations; i++) {
            if (OptimizerEnum.MBGD.equals(optimizer)) {
                // 小批量梯度
                currentThetas = MBGD(datas, labels, currentThetas, rate);
            } else if (OptimizerEnum.SGD.equals(optimizer)) {
                // 随机梯度
                currentThetas = SGD(datas, labels, currentThetas, rate);
            } else {
                // 批量梯度
                currentThetas = BGD(datas, labels, currentThetas, rate);
            }
            // 计算loss
            double calcLoss = costFunction(datas, labels, currentThetas);
            System.out.println("迭代第[" + (i + 1) + "]次, loss=" + calcLoss);
            if (loss != null) {
                loss.accept(calcLoss);
            }

        }
        long end = System.currentTimeMillis();
        System.out.println("训练完成, 耗时:" + (end - start) + "ms");
        return currentThetas;
    }

    public List<Double> fit(Matrix datas, Matrix labels, double rate, int maxNum) {
        return fit(datas, labels, rate, maxNum, 0, 0, NONE_NORMALIZE);
    }

    public List<Double> fit(Matrix datas, Matrix labels, double rate, int maxNum, int polynomialDegree, int sinusoidDegress, int normalizeType) {
        return fit(datas, labels, rate, maxNum, polynomialDegree, sinusoidDegress, normalizeType, OptimizerEnum.BGD);
    }

    public List<Double> fit(Matrix datas, Matrix labels, double rate, int maxNum, int polynomialDegree, int sinusoidDegress, int normalizeType, OptimizerEnum optimizer) {
        // 1. 数据标准化
        NormalizeData normalizeData = RegressionUtils.prepareForTraining(datas, polynomialDegree, sinusoidDegress, normalizeType);
        this.normalizationWeight1 = normalizeData.normalizationWeight1;
        this.normalizationWeight2 = normalizeData.normalizationWeight2;
        this.x2NormalizationWeight1 = normalizeData.x2NormalizationWeight1;
        this.x2NormalizationWeight2 = normalizeData.x2NormalizationWeight2;
        this.polynomialDegree = polynomialDegree;
        this.sinusoidDegress = sinusoidDegress;
        this.normalizeType = normalizeType;
        datas = normalizeData.featuresNormalized;

        // 加一列1, 方便计算
        datas = NumPy.hstack(new Matrix(datas.getX(), 1, 1d), datas);
        // 2. 初始化 theta
        Matrix thetas = new Matrix(datas.getY(), 1);
        // 第0列设置为1
        for (int x = 0; x < thetas.size(); x++) {
            if (x < thetas.size() - 1) {
                thetas.set(x, 0, 1d);
            } else {
                thetas.set(x, 0, -0.5d);
            }
        }

        // 4.计算并更改 theta
        List<Double> loss = new ArrayList<>();
        thetas = gradientDescent(thetas, datas, labels, rate, maxNum, optimizer, loss::add);
        this.thetas = thetas;
        // 5. 返回 loss
        return loss;
    }

    public double predict(double[] sourceData) {
        return predict(NumPy.ofRow(sourceData)).get(0, 0);
    }

    public Matrix predict(Matrix sourceData) {
        // 标准化
        NormalizeData normalizeData = RegressionUtils.prepareForTraining(sourceData,
                normalizationWeight1,
                normalizationWeight2,
                x2NormalizationWeight1,
                x2NormalizationWeight2,
                polynomialDegree,
                sinusoidDegress,
                normalizeType);
        sourceData = normalizeData.featuresNormalized;
        return predict(sourceData, this.thetas);
    }

    public Matrix predict(Matrix sourceData, Matrix thetas) {
        // 加一列1
        sourceData = NumPy.hstack(new Matrix(sourceData.getX(), 1, 1d), sourceData);
        Matrix hypothesis = hypothesis(sourceData, thetas);
        return hypothesis;
    }

    public Matrix gradientStep(Matrix data, Matrix labels, Matrix theta, double rate) {
        int numExamples = data.size();
        Matrix c = hypothesis(data, theta);
        Matrix labelDiff = labels.sub(c);
        // 批量梯度下降
        Matrix calcDelta = data.T().dot(labelDiff).multiply(rate);
        Matrix plus = theta.plus(calcDelta);
        return plus;
    }

    /**
     * 批量梯度下降
     *
     * @param data
     * @param labels
     * @param theta
     * @param rate
     * @return
     */
    public Matrix BGD(Matrix data, Matrix labels, Matrix theta, double rate) {
        Matrix h = hypothesis(data, theta);
        Matrix error = h.sub(labels);
        // 批量梯度下降
        Matrix calcDelta = data.T().dot(error).multiply(rate);
        Matrix sub = theta.sub(calcDelta);
        return sub;
    }

    /**
     * 随机梯度下降
     *
     * @param data
     * @param labels
     * @param theta
     * @param rate
     * @return
     */
    public Matrix SGD(Matrix data, Matrix labels, Matrix theta, double rate) {
        int m = data.getX();
        int n = data.getY();
        int rand = ThreadLocalRandom.current().nextInt(0, m);
        List<Double> xi = data.get(rand);
        List<Double> yi = labels.get(rand);
        Matrix xm = NumPy.ofRow(xi);
        Matrix ym = NumPy.ofRow(yi);
        Matrix h = hypothesis(xm, theta);
        Matrix error = h.sub(ym);
        Matrix calcDelta = xm.T().dot(error).multiply(rate);
        Matrix sub = theta.sub(calcDelta);
        return sub;
    }

    /**
     * 随机小批量梯度下降
     *
     * @param data
     * @param labels
     * @param theta
     * @param rate
     * @return
     */
    public Matrix MBGD(Matrix data, Matrix labels, Matrix theta, double rate) {
        int m = data.getX();
        int n = data.getY();
        int batchNum = 10;
        int rand = ThreadLocalRandom.current().nextInt(0, m);
        int end = Math.min(rand + batchNum, m);
        Matrix xm = new Matrix(0, 0);
        Matrix ym = new Matrix(0, 0);
        for (int i = rand; i < end; i++) {
            xm.add(data.get(i));
            ym.add(labels.get(i));
        }

        Matrix h = hypothesis(xm, theta);
        Matrix error = h.sub(ym);
        Matrix calcDelta = xm.T().dot(error).multiply(rate);
        Matrix sub = theta.sub(calcDelta);
        return sub;
    }


    public double costFunction(Matrix datas, Matrix labels, Matrix thetas) {
        int numExamples = datas.size();
        Matrix predictions = hypothesis(datas, thetas);
        List<Double> oneDatas = new ArrayList<>();
        List<Double> oneLabels = new ArrayList<>();

        List<Double> zeroDatas = new ArrayList<>();
        List<Double> zeroLabels = new ArrayList<>();
        for (int i = 0; i < predictions.size(); i++) {
            for (int y = 0; y < predictions.get(i).size(); y++) {
                if (labels.get(i, y) == 1d) {
                    oneDatas.add(predictions.get(i, y));
                    oneLabels.add(labels.get(i, y));
                } else {
                    zeroDatas.add(predictions.get(i, y));
                    zeroLabels.add(labels.get(i, y));
                }
            }
        }
        Matrix oneData = NumPy.ofColumn(oneDatas);
        Matrix oneLabel = NumPy.ofColumn(oneLabels);
        Matrix zeroData = NumPy.ofColumn(zeroDatas);
        Matrix zeroLabel = NumPy.ofColumn(zeroLabels);

        double yIsSetCost = NumPy.dot(oneLabel.T(), NumPy.log(oneData)).get(0, 0);
        double yIsNotSetCost = NumPy.dot(NumPy.sub(1, zeroLabel.T()), NumPy.log(NumPy.sub(1, zeroData))).get(0, 0);

        double cost = (-1d / numExamples) * (yIsSetCost + yIsNotSetCost);
        return cost;
    }

    public static Matrix hypothesis(Matrix data, Matrix theta) {
        Matrix dot = NumPy.sigmoid(data.dot(theta));
        return dot;
    }
}
