package gbench.sandbox.matlib;

import org.junit.jupiter.api.Test;

import static gbench.common.matlib.MatlibCanvas.println;
import static gbench.common.matlib.algebra.lisp.MyRecord.REC;
import static gbench.common.matlib.matrix.MatrixOps.mmult;
import static gbench.common.matlib.matrix.MatrixOps.transpose;
import static java.util.Arrays.asList;

import java.util.LinkedList;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import java.util.stream.Stream;

import gbench.common.matlib.algebra.num4j.XVec;
import gbench.common.matlib.algebra.num4j.NVec;
import gbench.common.matlib.algebra.lisp.DFrame;
import gbench.common.matlib.algebra.lisp.IRecord;

public class JunitCell2 {

    @Test
    public void foo() {

        final UnaryOperator<Double> sigmoid = x -> 1 / (1 + Math.exp(-x)); // 激活函数
        final UnaryOperator<Double> d_sigmoid = x -> sigmoid.apply(x) * (1 - sigmoid.apply(x)); // sigmoid 导数
        final BiFunction<LinkedList<NVec>, XVec<IRecord>, LinkedList<NVec>> iterate_of = (stack, layer) -> { // 模型值归集,stack递归数据上次计算的结果位于栈顶
            final var a_l1 = stack.peek().insert(0, 1d); // 增加bias项目的上一次结算的结果
            final var wts_l = layer.fmap(cell -> cell.computeIfAbsent("weights", NVec.of(a_l1.size(), i -> 1)).data()).data(); // 权重矩阵
            final var z_l = NVec.of(mmult(wts_l, a_l1.data())); // 归集函数
            final var a_l = z_l.fmapN(sigmoid); // 激活函数

            // 设置计算的结果状态
            Stream.iterate(0, i -> i + 1).limit(layer.size()).forEach(i -> layer.get(i).add("z", z_l.get(i), "a", a_l.get(i)));
            stack.push(a_l); // 新值压入堆栈

            return stack;
        }; // iterate_of
        final XVec<XVec<IRecord>> sequenceModel = XVec.OF(10, i -> REC("id", i)).sliceX(1, 2, 3, 3, 1); // 生成序列模型

        // 模型初始化
        sequenceModel.slidingS(2, 1, true).forEach(layers -> {
            final var previous = layers.car(); // 输入层
            final var current = layers.get(1); // 计算层
            final var weights = NVec.of(previous.size() + 1, i -> 1);
            previous.forEach(cell -> cell.computeIfAbsent("layer", 0)); // 设置阶层
            final var layerno = previous.car().i4("layer") + 1; // 层号
            current.forEach(cell -> cell.add("id", cell.get("id"), "layer", layerno, "inputs", previous, "weights", weights));
        });

        // 训练数据的准备
        final var rb = IRecord.rb("x1,x2,x3,x4,y"); // 模拟数据结构
        final var thetas = NVec.of(1, 2, 3, 4); // 标准参数
        final var datasource = Stream.iterate(0, i -> i + 1).limit(100) // 生成100条测试数据
                .map(i -> NVec.of(4, j -> Math.random())) // 生成随机数据
                .map(line -> line.append(line.dot(thetas)).mutate(rb::get)) // 通过指定参数模拟数据
                .collect(DFrame.dfmclc); // 数据源
        final var trainset = datasource.rowS().filter(e -> Math.random() > 0.9).collect(DFrame.dfmclc); // 训练集
        final var trainset_xs = trainset.rowS().map(line -> line.filterNot("y").arrayOf(IRecord.obj2dbl(), NVec::new))
                .collect(XVec.xveclc()); // 输入参数集合
        final var trainset_ys = trainset.rowS().map(line -> line.filter("y")).collect(DFrame.dfmclc); // 标准结果集合
        
        println(trainset_ys);
        println("y", trainset_ys.colS("y").collect(NVec.nveclc(IRecord.obj2dbl())));
        println(datasource);

        
        // 数据训练
        final var eps = 1E-3;
        final var eql = (Predicate<NVec>) errors -> eps > errors.mul(errors).mean(); // 终止结束条件
        while (true) {
            // 数据训练
            final var errors = trainset.rowS().map(line -> { // 记录行
                final var xs = line.filterNot("y").arrayOf(IRecord.obj2dbl(), NVec::new); // 提取x系列的值
                final var ys = sequenceModel.reduce(new LinkedList<NVec>(asList(xs)), iterate_of); // reduce
                final var y_label = NVec.of(line.dbl("y")); // 标准值
                final var y = ys.pop(); // 提取结果值
                final var error = y_label.sub(y); // 计算误差分量

                println("----------------------------------------");
                println("结果队列:", ys, "\ny_label:", y_label, "error", y_label.sub(y));

                return error.get(0);
            }).collect(NVec.nveclc()); // 归集误差平方
            
            println("errors", errors);

            if (eql.test(errors)) { // 满足误差越是
                break; // 误差条件达到容忍范围
            } else { // 权重调整
                final NVec grad_cost = errors.mul(-1); // 成本函数的对y值的偏导
                final Function<IRecord, Double> dsigma_dz_of = cell -> d_sigmoid.apply(cell.dbl("z")); // 激活函数的微分算法

                // backpass
                final var backpass = (Supplier<LinkedList<NVec>>) () -> { // back pass
                    final var delta_L = sequenceModel.last().map(dsigma_dz_of).collect(NVec.nveclc()).mul(grad_cost); // 最后一层导数
                    final var delta_ls = sequenceModel.reverse().slidingX(2, 1, true)
                            .reduce(new LinkedList<NVec>(asList(delta_L)), (stack, layers) -> {
                                final var delta_l1 = stack.peek(); // 后一层的delta
                                final var layer_l1 = layers.car(); // 后一层
                                final var wts_l1 = layer_l1.fmap(cell -> cell.get("weights", NVec.class).data()).data(); // 后一层的权值矩阵

                                final var layer_l = layers.get(1); // 当前层
                                final var dsigma_dz_l = layer_l.fmap(dsigma_dz_of).mutate(NVec::new); // 当前层激活函数的微分
                                final var deta_l = NVec.of(mmult(transpose(wts_l1), delta_l1.data())).mul(dsigma_dz_l); // 本层的delta

                                stack.push(deta_l);

                                return stack;
                            });
                    return delta_ls;
                }; // backpass

                trainset_xs.forEach(xs -> {
                    final var ys = sequenceModel.reduce(new LinkedList<NVec>(asList(xs)), iterate_of); // 正向计算一次
                    final var delta_ls = backpass.get(); // 反向计算一次
                    
                    println("---------------------------");
                    println("ys", ys.size(), ys);
                    println("deltas", delta_ls.size(), delta_ls);
                    println("---------------------------");
                }); // forEach

                break;
            } // forEach
            
        } // while true

    }

}
