๋ณธ๋ฌธ ๋ฐ”๋กœ๊ฐ€๊ธฐ
๋”ฅ๋Ÿฌ๋‹๐Ÿค–

ํ•™์Šต ์‹œํ‚ค๊ธฐ

by @ENFJ 2022. 11. 15.

https://teachablemachine.withgoogle.com/ (์ด ์‚ฌ์ดํŠธ์—์„œ ํ•™์Šต)

https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/image

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>๋ถˆ๋Ÿ‰ ์‚ฌ๊ณผ(๊ฐ์ฒด) ๊ฐ์ง€ ์‹œ์Šคํ…œ</title>
</head>
<body>
    <div>๋ถˆ๋Ÿ‰ ์‚ฌ๊ณผ(๊ฐ์ฒด) ๊ฐ์ง€ ์‹œ์Šคํ…œ_์ฒด๋ฆฌํ”ผ์ปค</div>
<button type="button" onclick="init()">Start</button>
<div id="webcam-container"></div>
<div id="label-container"></div>
<div id="label-container2"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/image@0.8/dist/teachablemachine-image.min.js"></script>
<script type="text/javascript">
    // More API functions here:
    // https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/image

    // the link to your model provided by Teachable Machine export panel
    const URL = "./my_model/";

    let model, webcam, labelContainer, maxPredictions;

    // Load the image model and setup the webcam
    async function init() {
        const modelURL = URL + "model.json";
        const metadataURL = URL + "metadata.json";

        // load the model and metadata
        // Refer to tmImage.loadFromFiles() in the API to support files from a file picker
        // or files from your local hard drive
        // Note: the pose library adds "tmImage" object to your window (window.tmImage)
        model = await tmImage.load(modelURL, metadataURL);
        maxPredictions = model.getTotalClasses();

        // Convenience function to setup a webcam
        const flip = true; // whether to flip the webcam
        webcam = new tmImage.Webcam(200, 200, flip); // width, height, flip
        await webcam.setup(); // request access to the webcam
        await webcam.play();
        window.requestAnimationFrame(loop);

        // append elements to the DOM
        document.getElementById("webcam-container").appendChild(webcam.canvas);
        labelContainer = document.getElementById("label-container");
        for (let i = 0; i < maxPredictions; i++) { // and class labels
            labelContainer.appendChild(document.createElement("div"));
        }
        labelContainer2 = document.getElementById("label-container2");
    }
    cnt = 0;
    async function loop() {
        webcam.update(); // update the webcam frame
        await predict();
        window.requestAnimationFrame(loop);
    }

    // run the webcam image through the image model
    async function predict() {
        // predict can take in an image, video or canvas html element
        const prediction = await model.predict(webcam.canvas);


                    if(prediction[0].className == "apple" && prediction[0].probability.toFixed(2) == 1.00){
                labelContainer.childNodes[0].innerHTML = "์ •์ƒ ์‚ฌ๊ณผ ์ž…๋‹ˆ๋‹ค."

            } else if(prediction[1].className == "rotten_apple" && prediction[1].probability.toFixed(2) == 1.00){
                cnt++
                labelContainer2.innerHTML = "๊ฐ์ง€๋œ ๋ถˆ๋Ÿ‰ ์‚ฌ๊ณผ ์ˆ˜:"+cnt;
                labelContainer.childNodes[0].innerHTML = "๋ถˆ๋Ÿ‰ ์‚ฌ๊ณผ ์ž…๋‹ˆ๋‹ค."
                var audio = new Audio('rotten.mp3');
                audio.play();
            } else {
                labelContainer.childNodes[0].innerHTML = "์•Œ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค."
            }
    
        
    //     for (let i = 0; i < maxPredictions; i++) {
    //         const classPrediction =
    //             prediction[i].className + ": " + prediction[i].probability.toFixed(2);
    //         labelContainer.childNodes[i].innerHTML = classPrediction;
    //     }
        }
</script>


    
</body>
</html>

 

๋ถˆ๋Ÿ‰ ํƒ์ง€์‹œ mp3ํŒŒ์ผ (https://clova.ai/voice)

๋ถˆ๋Ÿ‰+๊ฐ์ฒด+ํƒ์ง€+์‹œ์Šคํ…œ.mp3
0.04MB