<!DOCTYPE html>
<html lang="en">

<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>EMNIST Recognition</title>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@2.8.6/dist/tf.min.js"></script>
    <script async src="https://docs.opencv.org/4.5.0/opencv.js" onload="onOpenCvReady();"
        type="text/javascript"></script>
    <style>
        /* 样式设置 */
        body {
            font-family: Arial, sans-serif;
            display: flex;
            flex-direction: column;
            align-items: center;
            padding: 20px;
        }

        #canvas {
            border: 2px solid black;
            cursor: crosshair;
        }

        button {
            margin: 10px;
            padding: 10px 20px;
            font-size: 16px;
        }

        #result {
            font-size: 24px;
            margin-top: 20px;
        }

        #status {
            color: #888;
            margin-bottom: 10px;
        }
    </style>
</head>

<body>
    <div id="status">Loading model and OpenCV...</div>
    <canvas id="canvas" width="280" height="280" style="background-color: #ccc;"></canvas>
    <div>
        <button id="clear">Clear</button>
        <button id="predict">Predict</button>
    </div>
    <div id="result"></div>

    <script>
        // 全局变量
        const canvas = document.getElementById('canvas');
        const ctx = canvas.getContext('2d');
        const statusDiv = document.getElementById('status');
        const resultDiv = document.getElementById('result');
        let isDrawing = false;
        let model;
        let isModelReady = false;
        let isOpenCvReady = false;

        // TensorFlow.js 初始化
        tf.ready().then(() => {
            console.log('TensorFlow.js is ready');
            console.log('Current backend:', tf.getBackend());
        });

        async function initTF() {
            await tf.ready();
            console.log('TensorFlow.js initialized. Backend:', tf.getBackend());
        }

        initTF();

        // 模型加载
        async function loadModel() {
            try {
                model = await tf.loadLayersModel('web_model/model.json');
                console.log('Model loaded successfully');
                isModelReady = true;
                updateStatus();
            } catch (error) {
                console.error('Error loading the model:', error);
                statusDiv.textContent = 'Error loading the model. Please check console for details.';
            }
        }

        async function initializeTensorFlow() {
            await tf.setBackend('webgl');
            console.log('Backend set to:', tf.getBackend());
            await loadModel();
        }

        // OpenCV 准备就绪回调
        function onOpenCvReady() {
            isOpenCvReady = true;
            updateStatus();
        }

        function updateStatus() {
            if (isModelReady && isOpenCvReady) {
                statusDiv.textContent = 'Model and OpenCV are ready. You can start drawing.';
            }
        }

        initializeTensorFlow();

        // 绘图功能
        canvas.addEventListener('mousedown', startDrawing);
        canvas.addEventListener('mousemove', draw);
        canvas.addEventListener('mouseup', stopDrawing);
        canvas.addEventListener('mouseout', stopDrawing);

        function startDrawing(e) {
            isDrawing = true;
            draw(e);
        }

        function draw(e) {
            if (!isDrawing) return;
            ctx.lineWidth = 20;
            ctx.lineCap = 'round';
            ctx.strokeStyle = 'white';

            const rect = canvas.getBoundingClientRect();
            const x = e.clientX - rect.left;
            const y = e.clientY - rect.top;

            ctx.lineTo(x, y);
            ctx.stroke();
            ctx.beginPath();
            ctx.moveTo(x, y);
        }

        function stopDrawing() {
            isDrawing = false;
            ctx.beginPath();
        }

        // 预测功能
        document.getElementById('predict').addEventListener('click', async () => {
            if (!isModelReady || !isOpenCvReady) {
                alert('Please wait for the model and OpenCV to load.');
                return;
            }

            try {
                tf.engine().startScope();

                console.log("--- Starting new prediction ---");

                let imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);

                // 将Canvas图像数据转换为OpenCV的Mat格式
                let mat = cv.matFromImageData(imgData);


                
                let resizedMat = new cv.Mat();
                let rotatedMat = new cv.Mat();
                let mirroredMat = new cv.Mat();
                // 缩放图像到模型所需的大小（例如，28x28像素）
                cv.resize(mat, resizedMat, new cv.Size(28, 28), 0, 0, cv.INTER_AREA);



                cv.flip(resizedMat, mirroredMat, 1); // 1表示水平翻转
                //先镜像反转一下后逆时针旋转90度
                let center = new cv.Point(resizedMat.cols / 2, resizedMat.rows / 2);
                let rotationMatrix = cv.getRotationMatrix2D(center, 90, 1); // 90度为逆时针旋转
                cv.warpAffine(mirroredMat, rotatedMat, rotationMatrix, new cv.Size(28, 28));
                // 镜像反转


                //现在我想预览一下OpenCV下的rotatedMat图像，请写一个函数，直接在控制台打印base64或可以打开链接的编码
                let canvas1 = document.createElement('canvas');
                cv.imshow(canvas1, rotatedMat);
                let dataUrl = canvas1.toDataURL();
                console.log("rotatedMat的DataURL:", dataUrl);

                // 转换图像为模型可以处理的格式（通常是灰度图像）
                let grayMat = new cv.Mat();
                cv.cvtColor(rotatedMat, grayMat, cv.COLOR_RGBA2GRAY);
                // 创建用于显示的 Canvas 元素
                let canvas2 = document.createElement('canvas');

                // 生成图像的DataURL
                cv.imshow(canvas2, grayMat);
                let dataUr2 = canvas2.toDataURL();
                console.log("dataUr2的DataURL:", dataUr2);



                // 转换为TensorFlow.js张量，并进行归一化
                let imgDataTensor = tf.tensor(grayMat.data).reshape([1, 28, 28, 1]).toFloat().div(tf.scalar(255));


                // 模型预测
                let prediction = model.predict(imgDataTensor);
                prediction.print();


                let index = prediction.argMax(1).dataSync()[0];
                console.log("Predicted index:", index);

                let predictedChar = interpretEMNISTOutput(index);
                resultDiv.textContent = `Predicted: ${predictedChar}`;

                // // Top 5 预测结果
                let topK = tf.topk(prediction, 5);
                let topKIndices = topK.indices.dataSync();
                let topKValues = topK.values.dataSync();

                console.log("Top 5 predictions:");
                for (let i = 0; i < 5; i++) {
                    console.log(`${interpretEMNISTOutput(topKIndices[i])}: ${topKValues[i].toFixed(4)}`);
                }

                // 清理资源
                resizedMat.delete();
                rotationMatrix.delete();
                grayMat.delete();
                tf.engine().endScope();

            } catch (error) {
                console.error('Error during prediction:', error);
                resultDiv.textContent = 'Error during prediction. Please try again.';
            }
        });

        // EMNIST 输出解释
        function interpretEMNISTOutput(index) {
            const mapping = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz';
            return index < mapping.length ? mapping[index] : "Unknown";
        }

        // EMNIST 小写字母输出解释
        function interpretLowercaseEMNISTOutput(index) {
            const mapping = 'abcdefghijklmnopqrstuvwxyz';
            return index < mapping.length ? mapping[index] : "Unknown";
        }

        // 触摸支持
        canvas.addEventListener('touchstart', handleTouchStart, false);
        canvas.addEventListener('touchmove', handleTouchMove, false);
        canvas.addEventListener('touchend', handleTouchEnd, false);

        function handleTouchStart(e) {
            e.preventDefault();
            const touch = e.touches[0];
            startDrawing(touch);
        }

        function handleTouchMove(e) {
            e.preventDefault();
            const touch = e.touches[0];
            draw(touch);
        }

        function handleTouchEnd(e) {
            e.preventDefault();
            stopDrawing();
        }



        document.getElementById('clear').addEventListener('click', () => {
            ctx.clearRect(0, 0, canvas.width, canvas.height);
            resultDiv.textContent = '';
        });
    </script>
</body>

</html>