123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243 |
- <!DOCTYPE html>
- <html>
- <head>
- <meta charset="utf-8">
- <title>Semantic Segmentation Example</title>
- <link href="js_example_style.css" rel="stylesheet" type="text/css" />
- </head>
- <body>
- <h2>Semantic Segmentation Example</h2>
- <p>
- This tutorial shows you how to write an semantic segmentation example with OpenCV.js.<br>
- To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
- You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
- Then You should change the parameters in the first code snippet according to the uploaded model.
- Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
- </p>
- <div class="control"><button id="tryIt" disabled>Try it</button></div>
- <div>
- <table cellpadding="0" cellspacing="0" width="0" border="0">
- <tr>
- <td>
- <canvas id="canvasInput" width="400" height="400"></canvas>
- </td>
- <td>
- <canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
- </td>
- </tr>
- <tr>
- <td>
- <div class="caption">
- canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
- </div>
- </td>
- <td>
- <p id='status' align="left"></p>
- </td>
- </tr>
- <tr>
- <td>
- <div class="caption">
- modelFile <input type="file" id="modelFile" name="file">
- </div>
- </td>
- </tr>
- <tr>
- <td>
- <div class="caption">
- configFile <input type="file" id="configFile">
- </div>
- </td>
- </tr>
- </table>
- </div>
- <div>
- <p class="err" id="errorMessage"></p>
- </div>
- <div>
- <h3>Help function</h3>
- <p>1.The parameters for model inference which you can modify to investigate more models.</p>
- <textarea class="code" rows="5" cols="100" id="codeEditor" spellcheck="false"></textarea>
- <p>2.Main loop in which will read the image from canvas and do inference once.</p>
- <textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
- <p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
- <textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
- <p>4.Fetch model file and save to emscripten file system once click the input button.</p>
- <textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
- <p>5.The post-processing, including gengerate colors for different classes and argmax to get the classes for each pixel.</p>
- <textarea class="code" rows="34" cols="100" id="codeEditor4" spellcheck="false"></textarea>
- </div>
- <div id="appendix">
- <h2>Model Info:</h2>
- </div>
- <script src="utils.js" type="text/javascript"></script>
- <script src="js_dnn_example_helper.js" type="text/javascript"></script>
- <script id="codeSnippet" type="text/code-snippet">
- inputSize = [513, 513];
- mean = [127.5, 127.5, 127.5];
- std = 0.007843;
- swapRB = false;
- </script>
- <script id="codeSnippet1" type="text/code-snippet">
- main = async function() {
- const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
- let net = cv.readNet(configPath, modelPath);
- net.setInput(input);
- const start = performance.now();
- const result = net.forward();
- const time = performance.now()-start;
- const colors = generateColors(result);
- const output = argmax(result, colors);
- updateResult(output, time);
- input.delete();
- net.delete();
- result.delete();
- }
- </script>
- <script id="codeSnippet4" type="text/code-snippet">
- generateColors = function(result) {
- const numClasses = result.matSize[1];
- let colors = [0,0,0];
- while(colors.length < numClasses*3){
- colors.push(Math.round((Math.random()*255 + colors[colors.length-3]) / 2));
- }
- return colors;
- }
- argmax = function(result, colors) {
- const C = result.matSize[1];
- const H = result.matSize[2];
- const W = result.matSize[3];
- const resultData = result.data32F;
- const imgSize = H*W;
- let classId = [];
- for (i = 0; i<imgSize; ++i) {
- let id = 0;
- for (j = 0; j < C; ++j) {
- if (resultData[j*imgSize+i] > resultData[id*imgSize+i]) {
- id = j;
- }
- }
- classId.push(colors[id*3]);
- classId.push(colors[id*3+1]);
- classId.push(colors[id*3+2]);
- classId.push(255);
- }
- output = cv.matFromArray(H,W,cv.CV_8UC4,classId);
- return output;
- }
- </script>
- <script type="text/javascript">
- let jsonUrl = "js_semantic_segmentation_model_info.json";
- drawInfoTable(jsonUrl, 'appendix');
- let utils = new Utils('errorMessage');
- utils.loadCode('codeSnippet', 'codeEditor');
- utils.loadCode('codeSnippet1', 'codeEditor1');
- let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
- document.getElementById('codeEditor2').value = getBlobFromImageCode;
- let loadModelCode = 'loadModel = ' + loadModel.toString();
- document.getElementById('codeEditor3').value = loadModelCode;
- utils.loadCode('codeSnippet4', 'codeEditor4');
- let canvas = document.getElementById('canvasInput');
- let ctx = canvas.getContext('2d');
- let img = new Image();
- img.crossOrigin = 'anonymous';
- img.src = 'roi.jpg';
- img.onload = function() {
- ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
- };
- let tryIt = document.getElementById('tryIt');
- tryIt.addEventListener('click', () => {
- initStatus();
- document.getElementById('status').innerHTML = 'Running function main()...';
- utils.executeCode('codeEditor');
- utils.executeCode('codeEditor1');
- if (modelPath === "") {
- document.getElementById('status').innerHTML = 'Runing failed.';
- utils.printError('Please upload model file by clicking the button first.');
- } else {
- setTimeout(main, 1);
- }
- });
- let fileInput = document.getElementById('fileInput');
- fileInput.addEventListener('change', (e) => {
- initStatus();
- loadImageToCanvas(e, 'canvasInput');
- });
- let configPath = "";
- let configFile = document.getElementById('configFile');
- configFile.addEventListener('change', async (e) => {
- initStatus();
- configPath = await loadModel(e);
- document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
- });
- let modelPath = "";
- let modelFile = document.getElementById('modelFile');
- modelFile.addEventListener('change', async (e) => {
- initStatus();
- modelPath = await loadModel(e);
- document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
- configPath = "";
- configFile.value = "";
- });
- utils.loadOpenCv(() => {
- tryIt.removeAttribute('disabled');
- });
- var main = async function() {};
- var generateColors = function(result) {};
- var argmax = function(result, colors) {};
- utils.executeCode('codeEditor1');
- utils.executeCode('codeEditor2');
- utils.executeCode('codeEditor3');
- utils.executeCode('codeEditor4');
- function updateResult(output, time) {
- try{
- let canvasOutput = document.getElementById('canvasOutput');
- canvasOutput.style.visibility = "visible";
- let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
- cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
- cv.imshow('canvasOutput', resized);
- document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
- <b>Inference time:</b> ${time.toFixed(2)} ms`;
- } catch(e) {
- console.log(e);
- }
- }
- function initStatus() {
- document.getElementById('status').innerHTML = '';
- document.getElementById('canvasOutput').style.visibility = "hidden";
- utils.clearError();
- }
- </script>
- </body>
- </html>
|