js_face_recognition.html 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. <!DOCTYPE html>
  2. <html>
  3. <head>
  4. <script async src="../../opencv.js" type="text/javascript"></script>
  5. <script src="../../utils.js" type="text/javascript"></script>
  6. <script type='text/javascript'>
  7. var netDet = undefined, netRecogn = undefined;
  8. var persons = {};
  9. //! [Run face detection model]
  10. function detectFaces(img) {
  11. var blob = cv.blobFromImage(img, 1, {width: 192, height: 144}, [104, 117, 123, 0], false, false);
  12. netDet.setInput(blob);
  13. var out = netDet.forward();
  14. var faces = [];
  15. for (var i = 0, n = out.data32F.length; i < n; i += 7) {
  16. var confidence = out.data32F[i + 2];
  17. var left = out.data32F[i + 3] * img.cols;
  18. var top = out.data32F[i + 4] * img.rows;
  19. var right = out.data32F[i + 5] * img.cols;
  20. var bottom = out.data32F[i + 6] * img.rows;
  21. left = Math.min(Math.max(0, left), img.cols - 1);
  22. right = Math.min(Math.max(0, right), img.cols - 1);
  23. bottom = Math.min(Math.max(0, bottom), img.rows - 1);
  24. top = Math.min(Math.max(0, top), img.rows - 1);
  25. if (confidence > 0.5 && left < right && top < bottom) {
  26. faces.push({x: left, y: top, width: right - left, height: bottom - top})
  27. }
  28. }
  29. blob.delete();
  30. out.delete();
  31. return faces;
  32. };
  33. //! [Run face detection model]
  34. //! [Get 128 floating points feature vector]
  35. function face2vec(face) {
  36. var blob = cv.blobFromImage(face, 1.0 / 255, {width: 96, height: 96}, [0, 0, 0, 0], true, false)
  37. netRecogn.setInput(blob);
  38. var vec = netRecogn.forward();
  39. blob.delete();
  40. return vec;
  41. };
  42. //! [Get 128 floating points feature vector]
  43. //! [Recognize]
  44. function recognize(face) {
  45. var vec = face2vec(face);
  46. var bestMatchName = 'unknown';
  47. var bestMatchScore = 0.5; // Actually, the minimum is -1 but we use it as a threshold.
  48. for (name in persons) {
  49. var personVec = persons[name];
  50. var score = vec.dot(personVec);
  51. if (score > bestMatchScore) {
  52. bestMatchScore = score;
  53. bestMatchName = name;
  54. }
  55. }
  56. vec.delete();
  57. return bestMatchName;
  58. };
  59. //! [Recognize]
  60. function loadModels(callback) {
  61. var utils = new Utils('');
  62. var proto = 'https://raw.githubusercontent.com/opencv/opencv/4.x/samples/dnn/face_detector/deploy_lowres.prototxt';
  63. var weights = 'https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel';
  64. var recognModel = 'https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7';
  65. utils.createFileFromUrl('face_detector.prototxt', proto, () => {
  66. document.getElementById('status').innerHTML = 'Downloading face_detector.caffemodel';
  67. utils.createFileFromUrl('face_detector.caffemodel', weights, () => {
  68. document.getElementById('status').innerHTML = 'Downloading OpenFace model';
  69. utils.createFileFromUrl('face_recognition.t7', recognModel, () => {
  70. document.getElementById('status').innerHTML = '';
  71. netDet = cv.readNetFromCaffe('face_detector.prototxt', 'face_detector.caffemodel');
  72. netRecogn = cv.readNetFromTorch('face_recognition.t7');
  73. callback();
  74. });
  75. });
  76. });
  77. };
  78. function main() {
  79. // Create a camera object.
  80. var output = document.getElementById('output');
  81. var camera = document.createElement("video");
  82. camera.setAttribute("width", output.width);
  83. camera.setAttribute("height", output.height);
  84. // Get a permission from user to use a camera.
  85. navigator.mediaDevices.getUserMedia({video: true, audio: false})
  86. .then(function(stream) {
  87. camera.srcObject = stream;
  88. camera.onloadedmetadata = function(e) {
  89. camera.play();
  90. };
  91. });
  92. //! [Open a camera stream]
  93. var cap = new cv.VideoCapture(camera);
  94. var frame = new cv.Mat(camera.height, camera.width, cv.CV_8UC4);
  95. var frameBGR = new cv.Mat(camera.height, camera.width, cv.CV_8UC3);
  96. //! [Open a camera stream]
  97. //! [Add a person]
  98. document.getElementById('addPersonButton').onclick = function() {
  99. var rects = detectFaces(frameBGR);
  100. if (rects.length > 0) {
  101. var face = frameBGR.roi(rects[0]);
  102. var name = prompt('Say your name:');
  103. var cell = document.getElementById("targetNames").insertCell(0);
  104. cell.innerHTML = name;
  105. persons[name] = face2vec(face).clone();
  106. var canvas = document.createElement("canvas");
  107. canvas.setAttribute("width", 96);
  108. canvas.setAttribute("height", 96);
  109. var cell = document.getElementById("targetImgs").insertCell(0);
  110. cell.appendChild(canvas);
  111. var faceResized = new cv.Mat(canvas.height, canvas.width, cv.CV_8UC3);
  112. cv.resize(face, faceResized, {width: canvas.width, height: canvas.height});
  113. cv.cvtColor(faceResized, faceResized, cv.COLOR_BGR2RGB);
  114. cv.imshow(canvas, faceResized);
  115. faceResized.delete();
  116. }
  117. };
  118. //! [Add a person]
  119. //! [Define frames processing]
  120. var isRunning = false;
  121. const FPS = 30; // Target number of frames processed per second.
  122. function captureFrame() {
  123. var begin = Date.now();
  124. cap.read(frame); // Read a frame from camera
  125. cv.cvtColor(frame, frameBGR, cv.COLOR_RGBA2BGR);
  126. var faces = detectFaces(frameBGR);
  127. faces.forEach(function(rect) {
  128. cv.rectangle(frame, {x: rect.x, y: rect.y}, {x: rect.x + rect.width, y: rect.y + rect.height}, [0, 255, 0, 255]);
  129. var face = frameBGR.roi(rect);
  130. var name = recognize(face);
  131. cv.putText(frame, name, {x: rect.x, y: rect.y}, cv.FONT_HERSHEY_SIMPLEX, 1.0, [0, 255, 0, 255]);
  132. });
  133. cv.imshow(output, frame);
  134. // Loop this function.
  135. if (isRunning) {
  136. var delay = 1000 / FPS - (Date.now() - begin);
  137. setTimeout(captureFrame, delay);
  138. }
  139. };
  140. //! [Define frames processing]
  141. document.getElementById('startStopButton').onclick = function toggle() {
  142. if (isRunning) {
  143. isRunning = false;
  144. document.getElementById('startStopButton').innerHTML = 'Start';
  145. document.getElementById('addPersonButton').disabled = true;
  146. } else {
  147. function run() {
  148. isRunning = true;
  149. captureFrame();
  150. document.getElementById('startStopButton').innerHTML = 'Stop';
  151. document.getElementById('startStopButton').disabled = false;
  152. document.getElementById('addPersonButton').disabled = false;
  153. }
  154. if (netDet == undefined || netRecogn == undefined) {
  155. document.getElementById('startStopButton').disabled = true;
  156. loadModels(run); // Load models and run a pipeline;
  157. } else {
  158. run();
  159. }
  160. }
  161. };
  162. document.getElementById('startStopButton').disabled = false;
  163. };
  164. </script>
  165. </head>
  166. <body onload="cv['onRuntimeInitialized']=()=>{ main() }">
  167. <button id="startStopButton" type="button" disabled="true">Start</button>
  168. <div id="status"></div>
  169. <canvas id="output" width=640 height=480 style="max-width: 100%"></canvas>
  170. <table>
  171. <tr id="targetImgs"></tr>
  172. <tr id="targetNames"></tr>
  173. </table>
  174. <button id="addPersonButton" type="button" disabled="true">Add a person</button>
  175. </body>
  176. </html>