js_semantic_segmentation.html 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. <!DOCTYPE html>
  2. <html>
  3. <head>
  4. <meta charset="utf-8">
  5. <title>Semantic Segmentation Example</title>
  6. <link href="js_example_style.css" rel="stylesheet" type="text/css" />
  7. </head>
  8. <body>
  9. <h2>Semantic Segmentation Example</h2>
  10. <p>
  11. This tutorial shows you how to write an semantic segmentation example with OpenCV.js.<br>
  12. To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
  13. You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
  14. Then You should change the parameters in the first code snippet according to the uploaded model.
  15. Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
  16. </p>
  17. <div class="control"><button id="tryIt" disabled>Try it</button></div>
  18. <div>
  19. <table cellpadding="0" cellspacing="0" width="0" border="0">
  20. <tr>
  21. <td>
  22. <canvas id="canvasInput" width="400" height="400"></canvas>
  23. </td>
  24. <td>
  25. <canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
  26. </td>
  27. </tr>
  28. <tr>
  29. <td>
  30. <div class="caption">
  31. canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
  32. </div>
  33. </td>
  34. <td>
  35. <p id='status' align="left"></p>
  36. </td>
  37. </tr>
  38. <tr>
  39. <td>
  40. <div class="caption">
  41. modelFile <input type="file" id="modelFile" name="file">
  42. </div>
  43. </td>
  44. </tr>
  45. <tr>
  46. <td>
  47. <div class="caption">
  48. configFile <input type="file" id="configFile">
  49. </div>
  50. </td>
  51. </tr>
  52. </table>
  53. </div>
  54. <div>
  55. <p class="err" id="errorMessage"></p>
  56. </div>
  57. <div>
  58. <h3>Help function</h3>
  59. <p>1.The parameters for model inference which you can modify to investigate more models.</p>
  60. <textarea class="code" rows="5" cols="100" id="codeEditor" spellcheck="false"></textarea>
  61. <p>2.Main loop in which will read the image from canvas and do inference once.</p>
  62. <textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
  63. <p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
  64. <textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
  65. <p>4.Fetch model file and save to emscripten file system once click the input button.</p>
  66. <textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
  67. <p>5.The post-processing, including gengerate colors for different classes and argmax to get the classes for each pixel.</p>
  68. <textarea class="code" rows="34" cols="100" id="codeEditor4" spellcheck="false"></textarea>
  69. </div>
  70. <div id="appendix">
  71. <h2>Model Info:</h2>
  72. </div>
  73. <script src="utils.js" type="text/javascript"></script>
  74. <script src="js_dnn_example_helper.js" type="text/javascript"></script>
  75. <script id="codeSnippet" type="text/code-snippet">
  76. inputSize = [513, 513];
  77. mean = [127.5, 127.5, 127.5];
  78. std = 0.007843;
  79. swapRB = false;
  80. </script>
  81. <script id="codeSnippet1" type="text/code-snippet">
  82. main = async function() {
  83. const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
  84. let net = cv.readNet(configPath, modelPath);
  85. net.setInput(input);
  86. const start = performance.now();
  87. const result = net.forward();
  88. const time = performance.now()-start;
  89. const colors = generateColors(result);
  90. const output = argmax(result, colors);
  91. updateResult(output, time);
  92. input.delete();
  93. net.delete();
  94. result.delete();
  95. }
  96. </script>
  97. <script id="codeSnippet4" type="text/code-snippet">
  98. generateColors = function(result) {
  99. const numClasses = result.matSize[1];
  100. let colors = [0,0,0];
  101. while(colors.length < numClasses*3){
  102. colors.push(Math.round((Math.random()*255 + colors[colors.length-3]) / 2));
  103. }
  104. return colors;
  105. }
  106. argmax = function(result, colors) {
  107. const C = result.matSize[1];
  108. const H = result.matSize[2];
  109. const W = result.matSize[3];
  110. const resultData = result.data32F;
  111. const imgSize = H*W;
  112. let classId = [];
  113. for (i = 0; i<imgSize; ++i) {
  114. let id = 0;
  115. for (j = 0; j < C; ++j) {
  116. if (resultData[j*imgSize+i] > resultData[id*imgSize+i]) {
  117. id = j;
  118. }
  119. }
  120. classId.push(colors[id*3]);
  121. classId.push(colors[id*3+1]);
  122. classId.push(colors[id*3+2]);
  123. classId.push(255);
  124. }
  125. output = cv.matFromArray(H,W,cv.CV_8UC4,classId);
  126. return output;
  127. }
  128. </script>
  129. <script type="text/javascript">
  130. let jsonUrl = "js_semantic_segmentation_model_info.json";
  131. drawInfoTable(jsonUrl, 'appendix');
  132. let utils = new Utils('errorMessage');
  133. utils.loadCode('codeSnippet', 'codeEditor');
  134. utils.loadCode('codeSnippet1', 'codeEditor1');
  135. let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
  136. document.getElementById('codeEditor2').value = getBlobFromImageCode;
  137. let loadModelCode = 'loadModel = ' + loadModel.toString();
  138. document.getElementById('codeEditor3').value = loadModelCode;
  139. utils.loadCode('codeSnippet4', 'codeEditor4');
  140. let canvas = document.getElementById('canvasInput');
  141. let ctx = canvas.getContext('2d');
  142. let img = new Image();
  143. img.crossOrigin = 'anonymous';
  144. img.src = 'roi.jpg';
  145. img.onload = function() {
  146. ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
  147. };
  148. let tryIt = document.getElementById('tryIt');
  149. tryIt.addEventListener('click', () => {
  150. initStatus();
  151. document.getElementById('status').innerHTML = 'Running function main()...';
  152. utils.executeCode('codeEditor');
  153. utils.executeCode('codeEditor1');
  154. if (modelPath === "") {
  155. document.getElementById('status').innerHTML = 'Runing failed.';
  156. utils.printError('Please upload model file by clicking the button first.');
  157. } else {
  158. setTimeout(main, 1);
  159. }
  160. });
  161. let fileInput = document.getElementById('fileInput');
  162. fileInput.addEventListener('change', (e) => {
  163. initStatus();
  164. loadImageToCanvas(e, 'canvasInput');
  165. });
  166. let configPath = "";
  167. let configFile = document.getElementById('configFile');
  168. configFile.addEventListener('change', async (e) => {
  169. initStatus();
  170. configPath = await loadModel(e);
  171. document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
  172. });
  173. let modelPath = "";
  174. let modelFile = document.getElementById('modelFile');
  175. modelFile.addEventListener('change', async (e) => {
  176. initStatus();
  177. modelPath = await loadModel(e);
  178. document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
  179. configPath = "";
  180. configFile.value = "";
  181. });
  182. utils.loadOpenCv(() => {
  183. tryIt.removeAttribute('disabled');
  184. });
  185. var main = async function() {};
  186. var generateColors = function(result) {};
  187. var argmax = function(result, colors) {};
  188. utils.executeCode('codeEditor1');
  189. utils.executeCode('codeEditor2');
  190. utils.executeCode('codeEditor3');
  191. utils.executeCode('codeEditor4');
  192. function updateResult(output, time) {
  193. try{
  194. let canvasOutput = document.getElementById('canvasOutput');
  195. canvasOutput.style.visibility = "visible";
  196. let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
  197. cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
  198. cv.imshow('canvasOutput', resized);
  199. document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
  200. <b>Inference time:</b> ${time.toFixed(2)} ms`;
  201. } catch(e) {
  202. console.log(e);
  203. }
  204. }
  205. function initStatus() {
  206. document.getElementById('status').innerHTML = '';
  207. document.getElementById('canvasOutput').style.visibility = "hidden";
  208. utils.clearError();
  209. }
  210. </script>
  211. </body>
  212. </html>