基于opencv.js实现二维码定位
通过分析OpenCV.JS(官方下载地址 https://docs.opencv.org/_VERSION_/opencv.js)的白名单,我们可以了解目前官方PreBuild版本并没有实现QR识别。
<span># Classes and methods whitelist</span><br />core = {‘‘: [‘absdiff‘, ‘add‘, ‘addWeighted‘, ‘bitwise_and‘, ‘bitwise_not‘, ‘bitwise_or‘, ‘bitwise_xor‘, ‘cartToPolar‘,\<br /> ‘compare‘, ‘convertScaleAbs‘, ‘copyMakeBorder‘, ‘countNonZero‘, ‘determinant‘, ‘dft‘, ‘divide‘, ‘eigen‘, \<br /> ‘exp‘, ‘flip‘, ‘getOptimalDFTSize‘,‘gemm‘, ‘hconcat‘, ‘inRange‘, ‘invert‘, ‘kmeans‘, ‘log‘, ‘magnitude‘, \<br /> ‘max‘, ‘mean‘, ‘meanStdDev‘, ‘merge‘, ‘min‘, ‘minMaxLoc‘, ‘mixChannels‘, ‘multiply‘, ‘norm‘, ‘normalize‘, \<br /> ‘perspectiveTransform‘, ‘polarToCart‘, ‘pow‘, ‘randn‘, ‘randu‘, ‘reduce‘, ‘repeat‘, ‘rotate‘, ‘setIdentity‘, ‘setRNGSeed‘, \<br /> ‘solve‘, ‘solvePoly‘, ‘split‘, ‘sqrt‘, ‘subtract‘, ‘trace‘, ‘transform‘, ‘transpose‘, ‘vconcat‘],<br /> ‘Algorithm‘: []}<br /><br />imgproc = {‘‘: [‘Canny‘, ‘GaussianBlur‘, ‘Laplacian‘, ‘HoughLines‘, ‘HoughLinesP‘, ‘HoughCircles‘, ‘Scharr‘,‘Sobel‘, \<br /> ‘adaptiveThreshold‘,‘approxPolyDP‘,‘arcLength‘,‘bilateralFilter‘,‘blur‘,‘boundingRect‘,‘boxFilter‘,\<br /> ‘calcBackProject‘,‘calcHist‘,‘circle‘,‘compareHist‘,‘connectedComponents‘,‘connectedComponentsWithStats‘, \<br /> ‘contourArea‘, ‘convexHull‘, ‘convexityDefects‘, ‘cornerHarris‘,‘cornerMinEigenVal‘,‘createCLAHE‘, \<br /> ‘createLineSegmentDetector‘,‘cvtColor‘,‘demosaicing‘,‘dilate‘, ‘distanceTransform‘,‘distanceTransformWithLabels‘, \<br /> ‘drawContours‘,‘ellipse‘,‘ellipse2Poly‘,‘equalizeHist‘,‘erode‘, ‘filter2D‘, ‘findContours‘,‘fitEllipse‘, \<br /> ‘fitLine‘, ‘floodFill‘,‘getAffineTransform‘, ‘getPerspectiveTransform‘, ‘getRotationMatrix2D‘, ‘getStructuringElement‘, \<br /> ‘goodFeaturesToTrack‘,‘grabCut‘,‘initUndistortRectifyMap‘, ‘integral‘,‘integral2‘, ‘isContourConvex‘, ‘line‘, \<br /> ‘matchShapes‘, ‘matchTemplate‘,‘medianBlur‘, ‘minAreaRect‘, ‘minEnclosingCircle‘, ‘moments‘, ‘morphologyEx‘, \<br /> ‘pointPolygonTest‘, ‘putText‘,‘pyrDown‘,‘pyrUp‘,‘rectangle‘,‘remap‘, ‘resize‘,‘sepFilter2D‘,‘threshold‘, \<br /> ‘undistort‘,‘warpAffine‘,‘warpPerspective‘,‘warpPolar‘,‘watershed‘, \<br /> ‘fillPoly‘, ‘fillConvexPoly‘],<br /> ‘CLAHE‘: [‘apply‘, ‘collectGarbage‘, ‘getClipLimit‘, ‘getTilesGridSize‘, ‘setClipLimit‘, ‘setTilesGridSize‘]}<br /><br />objdetect = {‘‘: [‘groupRectangles‘],<br /> ‘HOGDescriptor‘: [‘load‘, ‘HOGDescriptor‘, ‘getDefaultPeopleDetector‘, ‘getDaimlerPeopleDetector‘, ‘setSVMDetector‘, ‘detectMultiScale‘],<br /> ‘CascadeClassifier‘: [‘load‘, ‘detectMultiScale2‘, ‘CascadeClassifier‘, ‘detectMultiScale3‘, ‘empty‘, ‘detectMultiScale‘]}<br /><br />video = {‘‘: [‘CamShift‘, ‘calcOpticalFlowFarneback‘, ‘calcOpticalFlowPyrLK‘, ‘createBackgroundSubtractorMOG2‘, \<br /> ‘findTransformECC‘, ‘meanShift‘],<br /> ‘BackgroundSubtractorMOG2‘: [‘BackgroundSubtractorMOG2‘, ‘apply‘],<br /> ‘BackgroundSubtractor‘: [‘apply‘, ‘getBackgroundImage‘]}<br /><br />dnn = {‘dnn_Net‘: [‘setInput‘, ‘forward‘],<br /> ‘‘: [‘readNetFromCaffe‘, ‘readNetFromTensorflow‘, ‘readNetFromTorch‘, ‘readNetFromDarknet‘,<br /> ‘readNetFromONNX‘, ‘readNet‘, ‘blobFromImage‘]}<br /><br />features2d = {‘Feature2D‘: [‘detect‘, ‘compute‘, ‘detectAndCompute‘, ‘descriptorSize‘, ‘descriptorType‘, ‘defaultNorm‘, ‘empty‘, ‘getDefaultName‘],<br /> ‘BRISK‘: [‘create‘, ‘getDefaultName‘],<br /> ‘ORB‘: [‘create‘, ‘setMaxFeatures‘, ‘setScaleFactor‘, ‘setNLevels‘, ‘setEdgeThreshold‘, ‘setFirstLevel‘, ‘setWTA_K‘, ‘setScoreType‘, ‘setPatchSize‘, ‘getFastThreshold‘, ‘getDefaultName‘],<br /> ‘MSER‘: [‘create‘, ‘detectRegions‘, ‘setDelta‘, ‘getDelta‘, ‘setMinArea‘, ‘getMinArea‘, ‘setMaxArea‘, ‘getMaxArea‘, ‘setPass2Only‘, ‘getPass2Only‘, ‘getDefaultName‘],<br /> ‘FastFeatureDetector‘: [‘create‘, ‘setThreshold‘, ‘getThreshold‘, ‘setNonmaxSuppression‘, ‘getNonmaxSuppression‘, ‘setType‘, ‘getType‘, ‘getDefaultName‘],<br /> ‘AgastFeatureDetector‘: [‘create‘, ‘setThreshold‘, ‘getThreshold‘, ‘setNonmaxSuppression‘, ‘getNonmaxSuppression‘, ‘setType‘, ‘getType‘, ‘getDefaultName‘],<br /> ‘GFTTDetector‘: [‘create‘, ‘setMaxFeatures‘, ‘getMaxFeatures‘, ‘setQualityLevel‘, ‘getQualityLevel‘, ‘setMinDistance‘, ‘getMinDistance‘, ‘setBlockSize‘, ‘getBlockSize‘, ‘setHarrisDetector‘, ‘getHarrisDetector‘, ‘setK‘, ‘getK‘, ‘getDefaultName‘],<br /> # ‘SimpleBlobDetector‘: [‘create‘],<br /> ‘KAZE‘: [‘create‘, ‘setExtended‘, ‘getExtended‘, ‘setUpright‘, ‘getUpright‘, ‘setThreshold‘, ‘getThreshold‘, ‘setNOctaves‘, ‘getNOctaves‘, ‘setNOctaveLayers‘, ‘getNOctaveLayers‘, ‘setDiffusivity‘, ‘getDiffusivity‘, ‘getDefaultName‘],<br /> ‘AKAZE‘: [‘create‘, ‘setDescriptorType‘, ‘getDescriptorType‘, ‘setDescriptorSize‘, ‘getDescriptorSize‘, ‘setDescriptorChannels‘, ‘getDescriptorChannels‘, ‘setThreshold‘, ‘getThreshold‘, ‘setNOctaves‘, ‘getNOctaves‘, ‘setNOctaveLayers‘, ‘getNOctaveLayers‘, ‘setDiffusivity‘, ‘getDiffusivity‘, ‘getDefaultName‘],<br /> ‘DescriptorMatcher‘: [‘add‘, ‘clear‘, ‘empty‘, ‘isMaskSupported‘, ‘train‘, ‘match‘, ‘knnMatch‘, ‘radiusMatch‘, ‘clone‘, ‘create‘],<br /> ‘BFMatcher‘: [‘isMaskSupported‘, ‘create‘],<br /> ‘‘: [‘drawKeypoints‘, ‘drawMatches‘, ‘drawMatchesKnn‘]}<br /><br />photo = {‘‘: [‘createAlignMTB‘, ‘createCalibrateDebevec‘, ‘createCalibrateRobertson‘, \<br /> ‘createMergeDebevec‘, ‘createMergeMertens‘, ‘createMergeRobertson‘, \<br /> ‘createTonemapDrago‘, ‘createTonemapMantiuk‘, ‘createTonemapReinhard‘, ‘inpaint‘],<br /> ‘CalibrateCRF‘: [‘process‘],<br /> ‘AlignMTB‘ : [‘calculateShift‘, ‘shiftMat‘, ‘computeBitmaps‘, ‘getMaxBits‘, ‘setMaxBits‘, \<br /> ‘getExcludeRange‘, ‘setExcludeRange‘, ‘getCut‘, ‘setCut‘],<br /> ‘CalibrateDebevec‘ : [‘getLambda‘, ‘setLambda‘, ‘getSamples‘, ‘setSamples‘, ‘getRandom‘, ‘setRandom‘],<br /> ‘CalibrateRobertson‘ : [‘getMaxIter‘, ‘setMaxIter‘, ‘getThreshold‘, ‘setThreshold‘, ‘getRadiance‘],<br /> ‘MergeExposures‘ : [‘process‘],<br /> ‘MergeDebevec‘ : [‘process‘],<br /> ‘MergeMertens‘ : [‘process‘, ‘getContrastWeight‘, ‘setContrastWeight‘, ‘getSaturationWeight‘, \<br /> ‘setSaturationWeight‘, ‘getExposureWeight‘, ‘setExposureWeight‘],<br /> ‘MergeRobertson‘ : [‘process‘],<br /> ‘Tonemap‘ : [‘process‘ , ‘getGamma‘, ‘setGamma‘],<br /> ‘TonemapDrago‘ : [‘getSaturation‘, ‘setSaturation‘, ‘getBias‘, ‘setBias‘, \<br /> ‘getSigmaColor‘, ‘setSigmaColor‘, ‘getSigmaSpace‘,‘setSigmaSpace‘],<br /> ‘TonemapMantiuk‘ : [‘getScale‘, ‘setScale‘, ‘getSaturation‘, ‘setSaturation‘],<br /> ‘TonemapReinhard‘ : [‘getIntensity‘, ‘setIntensity‘, ‘getLightAdaptation‘, ‘setLightAdaptation‘, \<br /> ‘getColorAdaptation‘, ‘setColorAdaptation‘]<br /> }<br /><br />aruco = {‘‘: [‘detectMarkers‘, ‘drawDetectedMarkers‘, ‘drawAxis‘, ‘estimatePoseSingleMarkers‘, ‘estimatePoseBoard‘, ‘estimatePoseCharucoBoard‘, ‘interpolateCornersCharuco‘, ‘drawDetectedCornersCharuco‘],<br /> ‘aruco_Dictionary‘: [‘get‘, ‘drawMarker‘],<br /> ‘aruco_Board‘: [‘create‘],<br /> ‘aruco_GridBoard‘: [‘create‘, ‘draw‘],<br /> ‘aruco_CharucoBoard‘: [‘create‘, ‘draw‘],<br /> }<br /><br />calib3d = {‘‘: [‘findHomography‘, ‘calibrateCameraExtended‘, ‘drawFrameAxes‘, ‘estimateAffine2D‘, ‘getDefaultNewCameraMatrix‘, ‘initUndistortRectifyMap‘, ‘Rodrigues‘]}<br /><br /><br />white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d])
但是我们仍然可以通过轮廓分析的相关方法,去实现“基于opencv.js实现二维码定位”,这就是本篇BLOG的主要内容。
一、基本原理
主要内容请参考《OpenCV使用FindContours进行二维码定位》,这里重要的回顾一下。
使用过FindContours直接寻找联通区域的函数。典型的运用在二维码上面:
对于它的3个定位点,这种重复包含的特性,在图上只有不容易重复的三处,这是具有排它性的。
那么轮廓识别的结果是如何展示的了?比如在这幅图中(白色区域为有数据的区域,黑色为无数据),0,1,2是第一层,然后里面是3,3的里面是4和5。(2a表示是2的内部),他们的关系应该是这样的:
所以我们只需要寻找某一个轮廓“有无爷爷轮廓”,就可以判断出来它是否“重复包含”
值得参考的C++代码应该是这样的,其中注释部分已经说明的比较清楚。
<span>#</span><span>include</span><span> </span><span>"opencv2/highgui/highgui.hpp"</span><br /><span>#</span><span>include</span><span> </span><span>"opencv2/imgproc/imgproc.hpp"</span><br /><span>#</span><span>include</span><span> </span><span><</span><span>iostream</span><span>></span><br /><span>#</span><span>include</span><span> </span><span><</span><span>stdio.h</span><span>></span><br /><span>#</span><span>include</span><span> </span><span><</span><span>stdlib.h</span><span>></span><br /><span>#</span><span>include</span><span> </span><span><</span><span>math.h</span><span>></span><br /><span>using</span><span> </span><span>namespace</span><span> cv;</span><br /><span>using</span><span> </span><span>namespace</span><span> std;</span><br /><span>//找到所提取轮廓的中心点</span><br /><span>//在提取的中心小正方形的边界上每隔周长个像素提取一个点的坐标,求所提取四个点的平均坐标(即为小正方形的大致中心)</span><br /><span>Point Center_cal(vector</span><span><</span><span>vector</span><span><</span><span>Point</span><span>></span><span> </span><span>></span><span> contours,</span><span>int</span><span> i)</span><br /><span>{</span><br /><span> </span><span>int</span><span> centerx</span><span>=</span><span>0</span><span>,centery</span><span>=</span><span>0</span><span>,n</span><span>=</span><span>contours[i].size();</span><br /><span> centerx </span><span>=</span><span> (contours[i][n</span><span>/</span><span>4</span><span>].x </span><span>+</span><span> contours[i][n</span><span>*</span><span>2</span><span>/</span><span>4</span><span>].x </span><span>+</span><span> contours[i][</span><span>3</span><span>*</span><span>n</span><span>/</span><span>4</span><span>].x </span><span>+</span><span> contours[i][n</span><span>-</span><span>1</span><span>].x)</span><span>/</span><span>4</span><span>;</span><br /><span> centery </span><span>=</span><span> (contours[i][n</span><span>/</span><span>4</span><span>].y </span><span>+</span><span> contours[i][n</span><span>*</span><span>2</span><span>/</span><span>4</span><span>].y </span><span>+</span><span> contours[i][</span><span>3</span><span>*</span><span>n</span><span>/</span><span>4</span><span>].y </span><span>+</span><span> contours[i][n</span><span>-</span><span>1</span><span>].y)</span><span>/</span><span>4</span><span>;</span><br /><span> Point point1</span><span>=</span><span>Point(centerx,centery);</span><br /><span> </span><span>return</span><span> point1;</span><br /><span>}</span><br /><span>int</span><span> main( </span><span>int</span><span> argc, </span><span>char</span><span>*</span><span>*</span><span> argv[] )</span><br /><span>{</span><br /><span> Mat src </span><span>=</span><span> imread( </span><span>"e:/sandbox/qrcode.jpg"</span><span>, </span><span>1</span><span> );</span><br /><span> resize(src,src,Size(</span><span>800</span><span>,</span><span>600</span><span>));</span><span>//标准大小</span><br /><span> Mat src_gray;</span><br /><span> Mat src_all</span><span>=</span><span>src.clone();</span><br /><span> Mat threshold_output;</span><br /><span> vector</span><span><</span><span>vector</span><span><</span><span>Point</span><span>></span><span> </span><span>></span><span> contours,contours2;</span><br /><span> vector</span><span><</span><span>Vec4i</span><span>></span><span> hierarchy;</span><br /><span> </span><span>//预处理</span><br /><span> cvtColor( src, src_gray, CV_BGR2GRAY );</span><br /><span> blur( src_gray, src_gray, Size(</span><span>3</span><span>,</span><span>3</span><span>) ); </span><span>//模糊,去除毛刺</span><br /><span> threshold( src_gray, threshold_output, </span><span>100</span><span>, </span><span>255</span><span>, THRESH_OTSU );</span><br /><span> </span><span>//寻找轮廓 </span><br /><span> </span><span>//第一个参数是输入图像 2值化的</span><br /><span> </span><span>//第二个参数是内存存储器,FindContours找到的轮廓放到内存里面。</span><br /><span> </span><span>//第三个参数是层级,**[Next, Previous, First_Child, Parent]** 的vector</span><br /><span> </span><span>//第四个参数是类型,采用树结构</span><br /><span> </span><span>//第五个参数是节点拟合模式,这里是全部寻找</span><br /><span> findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CHAIN_APPROX_NONE, Point(</span><span>0</span><span>, </span><span>0</span><span>) );</span><br /><span> </span><span>//轮廓筛选</span><br /><span> </span><span>int</span><span> c</span><span>=</span><span>0</span><span>,ic</span><span>=</span><span>0</span><span>,area</span><span>=</span><span>0</span><span>;</span><br /><span> </span><span>int</span><span> parentIdx</span><span>=</span><span>-</span><span>1</span><span>;</span><br /><span> </span><span>for</span><span>( </span><span>int</span><span> i </span><span>=</span><span> </span><span>0</span><span>; i</span><span><</span><span> contours.size(); i</span><span>++</span><span> )</span><br /><span> {</span><br /><span> </span><span>//hierarchy[i][2] != -1 表示不是最外面的轮廓</span><br /><span> </span><span>if</span><span> (hierarchy[i][</span><span>2</span><span>] </span><span>!=</span><span> </span><span>-</span><span>1</span><span> </span><span>&&</span><span> ic</span><span>==</span><span>0</span><span>)</span><br /><span> {</span><br /><span> parentIdx </span><span>=</span><span> i; </span><br /><span> ic</span><span>++</span><span>;</span><br /><span> }</span><br /><span> </span><span>else</span><span> </span><span>if</span><span> (hierarchy[i][</span><span>2</span><span>] </span><span>!=</span><span> </span><span>-</span><span>1</span><span>)</span><br /><span> {</span><br /><span> ic</span><span>++</span><span>;</span><br /><span> }</span><br /><span> </span><span>//最外面的清0</span><br /><span> </span><span>else</span><span> </span><span>if</span><span>(hierarchy[i][</span><span>2</span><span>] </span><span>==</span><span> </span><span>-</span><span>1</span><span>)</span><br /><span> {</span><br /><span> ic </span><span>=</span><span> </span><span>0</span><span>;</span><br /><span> parentIdx </span><span>=</span><span> </span><span>-</span><span>1</span><span>;</span><br /><span> }</span><br /><span> </span><span>//找到定位点信息</span><br /><span> </span><span>if</span><span> ( ic </span><span>></span><span>=</span><span> </span><span>2</span><span>)</span><br /><span> {</span><br /><span> contours2.push_back(contours[parentIdx]);</span><br /><span> ic </span><span>=</span><span> </span><span>0</span><span>;</span><br /><span> parentIdx </span><span>=</span><span> </span><span>-</span><span>1</span><span>;</span><br /><span> }</span><br /><span> }</span><br /><span> </span><span>//填充定位点</span><br /><span> </span><span>for</span><span>(</span><span>int</span><span> i</span><span>=</span><span>0</span><span>; i</span><span><</span><span>contours2.size(); i</span><span>++</span><span>)</span><br /><span> drawContours( src_all, contours2, i, CV_RGB(</span><span>0</span><span>,</span><span>255</span><span>,</span><span>0</span><span>) , </span><span>-</span><span>1</span><span> );</span><br /><span> </span><span>//连接定位点</span><br /><span> Point point[</span><span>3</span><span>];</span><br /><span> </span><span>for</span><span>(</span><span>int</span><span> i</span><span>=</span><span>0</span><span>; i</span><span><</span><span>contours2.size(); i</span><span>++</span><span>)</span><br /><span> {</span><br /><span> point[i] </span><span>=</span><span> Center_cal( contours2, i );</span><br /><span> }</span><br /><span> </span><br /><span> line(src_all,point[</span><span>0</span><span>],point[</span><span>1</span><span>],Scalar(</span><span>0</span><span>,</span><span>0</span><span>,</span><span>255</span><span>),</span><span>2</span><span>);</span><br /><span> line(src_all,point[</span><span>1</span><span>],point[</span><span>2</span><span>],Scalar(</span><span>0</span><span>,</span><span>0</span><span>,</span><span>255</span><span>),</span><span>2</span><span>);</span><br /><span> line(src_all,point[</span><span>0</span><span>],point[</span><span>2</span><span>],Scalar(</span><span>0</span><span>,</span><span>0</span><span>,</span><span>255</span><span>),</span><span>2</span><span>);</span><br /><span> </span><br /><span> imshow( </span><span>"结果"</span><span>, src_all );</span><br /><span> waitKey(</span><span>0</span><span>);</span><br /><span> </span><span>return</span><span>(</span><span>0</span><span>);</span><br /><span>}</span>
二、算法重点
由于hierarchy这块是比较缺乏文档的,在转换为JS的过程中存在一定困难,最终得到了以下的正确结果:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
<script async src="opencv.js" onload="onOpenCvReady();" type="text/javascript"></script>
</head>
<body>
<h2>Hello OpenCV.js</h2>
<p id="status">OpenCV.js is loading...</p>
<div>
<div class="inputoutput">
<img id="imageSrc" alt="No Image" />
<div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
</div>
<div class="inputoutput">
<canvas id="canvasOutput" ></canvas>
<div class="caption">canvasOutput</div>
</div>
<div class="inputoutput2">
<canvas id="canvasOutput2" ></canvas>
<div class="caption">canvasOutput2</div>
</div>
</div>
<script type="text/javascript">
let imgElement = document.getElementById(‘imageSrc‘);
let inputElement = document.getElementById(‘fileInput‘);
inputElement.addEventListener(‘change‘, (e) => {
imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
let src = cv.imread(imgElement);
let src_clone = cv.imread(imgElement);
let dsize = new cv.Size(800, 600);
// You can try more different parameters
cv.resize(src, src, dsize);cv.resize(src_clone, src_clone, dsize);
let dst = cv.Mat.zeros(src.rows,src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
let ksize = new cv.Size(3, 3);
// You can try more different parameters
cv.blur(src, src, ksize);
cv.threshold(src, src, 100, 255, cv.THRESH_OTSU);
let contours = new cv.MatVector();
let contours2 = new cv.MatVector();
let hierarchy = new cv.Mat();
// You can try more different parameters
cv.findContours(src, contours, hierarchy, cv.RETR_TREE, cv.CHAIN_APPROX_NONE);
//轮廓筛选
let c=0,ic=0,area=0;
let parentIdx = -1;
debugger
for( let i = 0; i< contours.size(); i++ )
{
//let hier = hierarchy.intPtr(0, i)
if (hierarchy.intPtr(0,i)[2] != -1 && ic==0)
{
parentIdx = i;
ic++;
}
else if (hierarchy.intPtr(0,i)[2] != -1)
{
ic++;
}
else if(hierarchy.intPtr(0,i)[2] == -1)
{
ic = 0;
parentIdx = -1;
}
//找到定位点信息
if ( ic >= 2)
{
//let cnt = matVec.get(0);
contours2.push_back(contours.get(parentIdx));
ic = 0;
parentIdx = -1;
}
}
console.log(contours2.size());
//填充定位点
for(let i=0; i<contours.size(); i++)
{
let color = new cv.Scalar(255, 0, 0, 255);
cv.drawContours(src_clone, contours, i,color,1);
}
cv.imshow(‘canvasOutput‘, src_clone);
for(let i=0; i<contours2.size(); i++)
{
let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, contours2, i, color, 1);
}
cv.imshow(‘canvasOutput2‘, dst);
src.delete(); src_clone.delete();
dst.delete(); contours.delete(); hierarchy.delete();
};
function onOpenCvReady() {
document.getElementById(‘status‘).innerHTML = ‘OpenCV.js is ready.‘;
}
</script>
</body>
</html>
其中绝大多数部分都和C++相似,不同的地方已经标红。它能够成功运行,并且得到正确的定位。(这里OpenCVJS的相关运行情况请参考官方教程)
三、研究收获
这次研究的关键节点, 是建立了Debug机制。在JS代码中加入debugger语句,并且开启F12,则在调试的过程中,可以查看各个变量的信息。
此外,非常重要的参考资料,就是OpenCV的官方教程。如果希望进一步进行研究的话,首先需要先收集掌握所有现有资料。
感谢阅读至此,希望有所帮助。
相关推荐
woniulx0 2020-03-26
learningCV 2020-11-10
learningCV 2020-08-25
huang00 2020-08-21
wangdaren 2020-08-15
BeanJoy 2020-07-28
csdmeb 2020-06-25
wangdaren 2020-06-14
pythonxuexi 2020-06-13
woniulx0 2020-06-13
greent00 2020-06-10
liangzuojiayi 2020-06-09
greent00 2020-06-09
csdmeb 2020-06-08
BeanJoy 2020-06-06
lihuifei 2020-06-05
wangdaren 2020-05-31
greent00 2020-05-30