<html>

<head>
    <meta charset="UTF-8">
    <title>Interpreting Hand Gestures and Sign Language in the Webcam with AI using TensorFlow.js</title>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@2.0.0/dist/tf.min.js"></script>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-data@2.0.0/dist/tf-data.min.js"></script>
    <style>
        img,
        video {
            object-fit: cover;
            margin:0 auto;
        }
    </style>
</head>

<body>
    <video autoplay playsinline muted id="webcam" width="600" height="600"></video>
    <div id="buttons">
        <button onclick="captureSample(0)">None</button>
        <button onclick="captureSample(1)">✊ (Rock)</button>
        <button onclick="captureSample(2)">🖐 (Paper)</button>
        <button onclick="captureSample(3)">✌️ (Scissors)</button>
        <button onclick="captureSample(4)">👌 (Letter D)</button>
        <button onclick="captureSample(5)">👍 (Thumb Up)</button>
        <button onclick="captureSample(6)">🖖 (Vulcan)</button>
        <button onclick="captureSample(7)">🤟 (ILY - I Love You)</button>
        <button onclick="trainModel()">Train</button>
    </div>
    <h1 id="status">Loading...</h1>
    <script>
        let trainingData = [];

        const labels = [
            "None",
            "✊ (Rock)",
            "🖐 (Paper)",
            "✌️ (Scissors)",
            "👌 (Letter D)",
            "👍 (Thumb Up)",
            "🖖 (Vulcan)",
            "🤟 (ILY - I Love You)"
        ];

        function setText(text) {
            document.getElementById("status").innerText = text;
        }

        async function predictImage() {
            if (!hasTrained) { return; } // Skip prediction until trained
            const img = await getWebcamImage();
            let result = tf.tidy(() => {
                const input = img.reshape([1, 224, 224, 3]);
                return model.predict(input);
            });
            img.dispose();
            let prediction = await result.data();
            result.dispose();
            // Get the index of the highest value in the prediction
            let id = prediction.indexOf(Math.max(...prediction));
            setText(labels[id]);
        }

        function createTransferModel(model) {
            // Create the truncated base model (remove the "top" layers, classification + bottleneck layers)
            const bottleneck = model.getLayer("dropout"); // This is the final layer before the conv_pred pre-trained classification layer
            const baseModel = tf.model({
                inputs: model.inputs,
                outputs: bottleneck.output
            });
            // Freeze the convolutional base
            for (const layer of baseModel.layers) {
                layer.trainable = false;
            }
            // Add a classification head
            const newHead = tf.sequential();
            newHead.add(tf.layers.flatten({
                inputShape: baseModel.outputs[0].shape.slice(1)
            }));
            newHead.add(tf.layers.dense({ units: 100, activation: 'relu' }));
            newHead.add(tf.layers.dense({ units: 100, activation: 'relu' }));
            newHead.add(tf.layers.dense({ units: 10, activation: 'relu' }));
            newHead.add(tf.layers.dense({
                units: labels.length,
                kernelInitializer: 'varianceScaling',
                useBias: false,
                activation: 'softmax'
            }));
            // Build the new model
            const newOutput = newHead.apply(baseModel.outputs[0]);
            const newModel = tf.model({ inputs: baseModel.inputs, outputs: newOutput });
            return newModel;
        }

        async function trainModel() {
            hasTrained = false;
            setText("Training...");

            // Setup training data
            const imageSamples = [];
            const targetSamples = [];
            trainingData.forEach(sample => {
                imageSamples.push(sample.image);
                let cat = [];
                for (let c = 0; c < labels.length; c++) {
                    cat.push(c === sample.category ? 1 : 0);
                }
                targetSamples.push(tf.tensor1d(cat));
            });
            const xs = tf.stack(imageSamples);
            const ys = tf.stack(targetSamples);

            // Train the model on new image samples
            model.compile({ loss: "meanSquaredError", optimizer: "adam", metrics: ["acc"] });

            await model.fit(xs, ys, {
                epochs: 30,
                shuffle: true,
                callbacks: {
                    onEpochEnd: (epoch, logs) => {
                        console.log("Epoch #", epoch, logs);
                    }
                }
            });
            hasTrained = true;
        }

        // Mobilenet v1 0.25 224x224 model
        const mobilenet = "https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_0.25_224/model.json";

        let model = null;
        let hasTrained = false;

        async function setupWebcam() {
            return new Promise((resolve, reject) => {
                const webcamElement = document.getElementById("webcam");
                const navigatorAny = navigator;
                navigator.getUserMedia = navigator.getUserMedia ||
                    navigatorAny.webkitGetUserMedia || navigatorAny.mozGetUserMedia ||
                    navigatorAny.msGetUserMedia;
                if (navigator.getUserMedia) {
                    navigator.getUserMedia({ video: true },
                        stream => {
                            webcamElement.srcObject = stream;
                            webcamElement.addEventListener("loadeddata", resolve, false);
                        },
                        error => reject());
                }
                else {
                    reject();
                }
            });
        }

        async function getWebcamImage() {
            const img = (await webcam.capture()).toFloat();
            const normalized = img.div(127).sub(1);
            return normalized;
        }

        async function captureSample(category) {
            trainingData.push({
                image: await getWebcamImage(),
                category: category
            });
            setText("Captured: " + labels[category]);
        }

        let webcam = null;

        (async () => {
            // Load the model
            model = await tf.loadLayersModel(mobilenet);
            model = createTransferModel(model);
            await setupWebcam();
            webcam = await tf.data.webcam(document.getElementById("webcam"));
            // Setup prediction every 200 ms
            setInterval(predictImage, 200);
        })();
    </script>
</body>

</html>