拿 ml5 来练习 p5.js (二)

介绍

首先介绍什麽是 p5.js,
p5.js 是基於 Processing 在浏览器中提供友善的画布 (canvas) 使用介面。(https://creativecoding.in/2020/04/24/p5-js-%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B/)

p5.js 与 ml5 的共同点是它们都可以透过浏览器使用 GPU 及 GPU 的专属记忆体。

备料

接着备料,

  1. hello-ml5 里新增两个档案,一档名为 face_p5.html,另一档名为 face_nop5.html
  2. face_p5.htmlface_nop5.html 分别输入以下程序码。

face_p5.html 的程序码如下—

<html>

<head>
    <meta charset="UTF-8">
    <title>FaceApi Landmarks Demo With p5.js</title>

    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/p5.min.js"></script>
    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/addons/p5.dom.min.js"></script>
    <script src="https://unpkg.com/ml5@latest/dist/ml5.min.js"></script>
</head>

<body>
    <h1>FaceApi Landmarks Demo With p5.js</h1>
    <script src="sketch_face_p5.js"></script>
</body>

</html>

face_nop5.html 的程序码如下—

<html>

<head>
    <meta charset="UTF-8">
    <title>FaceApi Landmarks Demo With no p5.js</title>

    <script src="https://unpkg.com/ml5@latest/dist/ml5.min.js" type="text/javascript"></script>
</head>

<body>
    <h1>FaceApi Landmarks Demo With no p5.js</h1>
    <script src="sketch_face_nop5.js"></script>
</body>

</html>
  1. hello-ml5 里新增两个档案,一档名为 sketch_face_p5.js,另一档名为 sketch_face_nop5.js
    且分别输入以下程序码。

sketch_face_p5.js 的程序码如下—

let faceapi;
let video;
let detections;

const detection_options = {
    withLandmarks: true,
    withDescriptors: false
}

let width = 360;
let height = 280;

function setup() {
    createCanvas(width, height);

    // load up your video
    video = createCapture(VIDEO);
    video.size(width, height);
    video.hide(); // Hide the video element, and just show the canvas

    faceapi = ml5.faceApi(video, detection_options, modelReady);
}

function modelReady() {
    console.log('ready!');
    faceapi.detect(gotResults);
}

function gotResults(err, result) {
    if (err) {
        console.log(err);
        return;
    }

    // console.log(result)
    detections = result;

    image(video, 0, 0, width, height);

    if (detections) {
        if (detections.length > 0) {
            drawLandmarks(detections);
        }
    }
    faceapi.detect(gotResults);
}

function drawLandmarks(detections) {

    stroke(161, 95, 251);

    for (let i = 0; i < detections.length; i++) {
        const alignedRect = detections[i].alignedRect;
        const x = alignedRect._box._x;
        const y = alignedRect._box._y;
        const boxWidth = alignedRect._box._width;
        const boxHeight = alignedRect._box._height;

        rect(x, y, boxWidth, boxHeight);

        const mouth = detections[i].parts.mouth;
        const nose = detections[i].parts.nose;
        const leftEye = detections[i].parts.leftEye;
        const rightEye = detections[i].parts.rightEye;
        const rightEyeBrow = detections[i].parts.rightEyeBrow;
        const leftEyeBrow = detections[i].parts.leftEyeBrow;

        drawPart(mouth, true);
        drawPart(nose, false);
        drawPart(leftEye, true);
        drawPart(leftEyeBrow, false);
        drawPart(rightEye, true);
        drawPart(rightEyeBrow, false);
    }

}

function drawPart(feature, closed) {

    beginShape();
    for (let i = 0; i < feature.length; i++) {
        const x = feature[i]._x;
        const y = feature[i]._y;

        vertex(x, y);
    }

    if (closed === true) {
        endShape(CLOSE);
    } else {
        endShape();
    }

}

sketch_face_nop5.js 的程序码如下—

let faceapi;
let video;
let detections;

const detection_options = {
    withLandmarks: true,
    withDescriptors: false
}

let width = 360;
let height = 280;

window.addEventListener('DOMContentLoaded', function () {
    setup();
});
async function setup() {
    createCanvas(width, height);

    // load up your video
    video = await getVideo();

    faceapi = ml5.faceApi(video, detection_options, modelReady);
}

let canvas, ctx;
function createCanvas(w, h) {
    canvas = document.createElement("canvas");
    canvas.width = w;
    canvas.height = h;
    document.body.appendChild(canvas);
    ctx = canvas.getContext('2d');
}

async function getVideo() {
    const videoElement = document.createElement('video');
    videoElement.width = width;
    videoElement.height = height;
    videoElement.setAttribute("style", "display: none;"); // Hide the video element, and just show the canvas
    document.body.appendChild(videoElement);

    // Create a webcam capture
    const capture = await navigator.mediaDevices.getUserMedia({ video: true });
    videoElement.srcObject = capture;
    videoElement.play();

    return videoElement;
}

function modelReady() {
    console.log('ready!');
    faceapi.detect(gotResults);
}

function gotResults(err, result) {
    if (err) {
        console.log(err);
        return;
    }

    // console.log(result)
    detections = result;

    ctx.drawImage(video, 0, 0, width, height);

    if (detections) {
        if (detections.length > 0) {
            drawLandmarks(detections);
        }
    }
    faceapi.detect(gotResults);
}

function drawLandmarks(detections) {

    ctx.strokeStyle = "#a15ffb";

    for (let i = 0; i < detections.length; i++) {
        const alignedRect = detections[i].alignedRect;
        const x = alignedRect._box._x;
        const y = alignedRect._box._y;
        const boxWidth = alignedRect._box._width;
        const boxHeight = alignedRect._box._height;

        ctx.beginPath();
        ctx.rect(x, y, boxWidth, boxHeight);
        ctx.fillStyle = "white";
        ctx.fill();

        const mouth = detections[i].parts.mouth;
        const nose = detections[i].parts.nose;
        const leftEye = detections[i].parts.leftEye;
        const rightEye = detections[i].parts.rightEye;
        const rightEyeBrow = detections[i].parts.rightEyeBrow;
        const leftEyeBrow = detections[i].parts.leftEyeBrow;

        drawPart(mouth, true);
        drawPart(nose, false);
        drawPart(leftEye, true);
        drawPart(leftEyeBrow, false);
        drawPart(rightEye, true);
        drawPart(rightEyeBrow, false);
    }

}

function drawPart(feature, closed) {

    ctx.beginPath();
    for (let i = 0; i < feature.length; i++) {
        const x = feature[i]._x;
        const y = feature[i]._y;

        if (i === 0) {
            ctx.moveTo(x, y);
        } else {
            ctx.lineTo(x, y);
        }
    }

    if (closed === true) {
        ctx.closePath();
    }
    ctx.stroke();

}

执行

备料完成後,就可启动 Live Server,
在 VS Code 里的 face_p5.html 程序码按右键,在显示的内容选单里,点选 Open with Live Server
就可显示如下画面。(会先跳出询问是否让此网页有使用网路摄影机的权限,请按同意)
https://ithelp.ithome.com.tw/upload/images/20201028/20132156w825fEFlIQ.png
face_nop5.html 同上的步骤启动,
就可显示如下画面。
https://ithelp.ithome.com.tw/upload/images/20201028/20132156LsMuGLiKB9.png
可见两者的执行结果是一样的。

比较

使用 WinMerge 比较 sketch_face_p5.jssketch_face_nop5.js 的差异,
就可以了解 p5.js 能做的, javascript 原本就能做到,
但 p5.js 真的把程序码变得更简洁,在这个例子几乎减少了四分之一的程序码。

所以, p5.js 及 ml5 都是让程序设计师在使用新技术时,可以更专注在
客制化的需求


<<:  迈向CISSP成功之路(读书心得)

>>:  网路方面被问到的案例整理 - 你也可以成为网路高手

D4 第二周 (回忆篇)

今天会是比较划水的回忆篇,可以斟酌看看。 这周开始正式学习 javascript,然後那时候疫情还没...

[Day 5] 机器学习大补帖

机器学习大补帖 今日学习目标 了解机器学习是什麽 何谓机器学习? 人工智慧的范畴 什麽是人工智慧? ...

[Angular] Day20. Angular Routing

在现代网页中常会使用一种称为 single page application(SPA)的技术,可以通...

Annotation 介绍

Annotation 很常写 Java 或是 Kotlin 的朋友对 annotation 大概不陌...

Day10【Web】网路攻击:CSRF

CSRF / XSRF CSRF 全称 Cross Site Request Forgery, 中文...