face_detector_accuracy.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. # This script is used to estimate an accuracy of different face detection models.
  2. # COCO evaluation tool is used to compute an accuracy metrics (Average Precision).
  3. # Script works with different face detection datasets.
  4. import os
  5. import json
  6. from fnmatch import fnmatch
  7. from math import pi
  8. import cv2 as cv
  9. import argparse
  10. import os
  11. import sys
  12. from pycocotools.coco import COCO
  13. from pycocotools.cocoeval import COCOeval
  14. parser = argparse.ArgumentParser(
  15. description='Evaluate OpenCV face detection algorithms '
  16. 'using COCO evaluation tool, http://cocodataset.org/#detections-eval')
  17. parser.add_argument('--proto', help='Path to .prototxt of Caffe model or .pbtxt of TensorFlow graph')
  18. parser.add_argument('--model', help='Path to .caffemodel trained in Caffe or .pb from TensorFlow')
  19. parser.add_argument('--cascade', help='Optional path to trained Haar cascade as '
  20. 'an additional model for evaluation')
  21. parser.add_argument('--ann', help='Path to text file with ground truth annotations')
  22. parser.add_argument('--pics', help='Path to images root directory')
  23. parser.add_argument('--fddb', help='Evaluate FDDB dataset, http://vis-www.cs.umass.edu/fddb/', action='store_true')
  24. parser.add_argument('--wider', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true')
  25. args = parser.parse_args()
  26. dataset = {}
  27. dataset['images'] = []
  28. dataset['categories'] = [{ 'id': 0, 'name': 'face' }]
  29. dataset['annotations'] = []
  30. def ellipse2Rect(params):
  31. rad_x = params[0]
  32. rad_y = params[1]
  33. angle = params[2] * 180.0 / pi
  34. center_x = params[3]
  35. center_y = params[4]
  36. pts = cv.ellipse2Poly((int(center_x), int(center_y)), (int(rad_x), int(rad_y)),
  37. int(angle), 0, 360, 10)
  38. rect = cv.boundingRect(pts)
  39. left = rect[0]
  40. top = rect[1]
  41. right = rect[0] + rect[2]
  42. bottom = rect[1] + rect[3]
  43. return left, top, right, bottom
  44. def addImage(imagePath):
  45. assert('images' in dataset)
  46. imageId = len(dataset['images'])
  47. dataset['images'].append({
  48. 'id': int(imageId),
  49. 'file_name': imagePath
  50. })
  51. return imageId
  52. def addBBox(imageId, left, top, width, height):
  53. assert('annotations' in dataset)
  54. dataset['annotations'].append({
  55. 'id': len(dataset['annotations']),
  56. 'image_id': int(imageId),
  57. 'category_id': 0, # Face
  58. 'bbox': [int(left), int(top), int(width), int(height)],
  59. 'iscrowd': 0,
  60. 'area': float(width * height)
  61. })
  62. def addDetection(detections, imageId, left, top, width, height, score):
  63. detections.append({
  64. 'image_id': int(imageId),
  65. 'category_id': 0, # Face
  66. 'bbox': [int(left), int(top), int(width), int(height)],
  67. 'score': float(score)
  68. })
  69. def fddb_dataset(annotations, images):
  70. for d in os.listdir(annotations):
  71. if fnmatch(d, 'FDDB-fold-*-ellipseList.txt'):
  72. with open(os.path.join(annotations, d), 'rt') as f:
  73. lines = [line.rstrip('\n') for line in f]
  74. lineId = 0
  75. while lineId < len(lines):
  76. # Image
  77. imgPath = lines[lineId]
  78. lineId += 1
  79. imageId = addImage(os.path.join(images, imgPath) + '.jpg')
  80. img = cv.imread(os.path.join(images, imgPath) + '.jpg')
  81. # Faces
  82. numFaces = int(lines[lineId])
  83. lineId += 1
  84. for i in range(numFaces):
  85. params = [float(v) for v in lines[lineId].split()]
  86. lineId += 1
  87. left, top, right, bottom = ellipse2Rect(params)
  88. addBBox(imageId, left, top, width=right - left + 1,
  89. height=bottom - top + 1)
  90. def wider_dataset(annotations, images):
  91. with open(annotations, 'rt') as f:
  92. lines = [line.rstrip('\n') for line in f]
  93. lineId = 0
  94. while lineId < len(lines):
  95. # Image
  96. imgPath = lines[lineId]
  97. lineId += 1
  98. imageId = addImage(os.path.join(images, imgPath))
  99. # Faces
  100. numFaces = int(lines[lineId])
  101. lineId += 1
  102. for i in range(numFaces):
  103. params = [int(v) for v in lines[lineId].split()]
  104. lineId += 1
  105. left, top, width, height = params[0], params[1], params[2], params[3]
  106. addBBox(imageId, left, top, width, height)
  107. def evaluate():
  108. cocoGt = COCO('annotations.json')
  109. cocoDt = cocoGt.loadRes('detections.json')
  110. cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
  111. cocoEval.evaluate()
  112. cocoEval.accumulate()
  113. cocoEval.summarize()
  114. ### Convert to COCO annotations format #########################################
  115. assert(args.fddb or args.wider)
  116. if args.fddb:
  117. fddb_dataset(args.ann, args.pics)
  118. elif args.wider:
  119. wider_dataset(args.ann, args.pics)
  120. with open('annotations.json', 'wt') as f:
  121. json.dump(dataset, f)
  122. ### Obtain detections ##########################################################
  123. detections = []
  124. if args.proto and args.model:
  125. net = cv.dnn.readNet(args.proto, args.model)
  126. def detect(img, imageId):
  127. imgWidth = img.shape[1]
  128. imgHeight = img.shape[0]
  129. net.setInput(cv.dnn.blobFromImage(img, 1.0, (300, 300), (104., 177., 123.), False, False))
  130. out = net.forward()
  131. for i in range(out.shape[2]):
  132. confidence = out[0, 0, i, 2]
  133. left = int(out[0, 0, i, 3] * img.shape[1])
  134. top = int(out[0, 0, i, 4] * img.shape[0])
  135. right = int(out[0, 0, i, 5] * img.shape[1])
  136. bottom = int(out[0, 0, i, 6] * img.shape[0])
  137. x = max(0, min(left, img.shape[1] - 1))
  138. y = max(0, min(top, img.shape[0] - 1))
  139. w = max(0, min(right - x + 1, img.shape[1] - x))
  140. h = max(0, min(bottom - y + 1, img.shape[0] - y))
  141. addDetection(detections, imageId, x, y, w, h, score=confidence)
  142. elif args.cascade:
  143. cascade = cv.CascadeClassifier(args.cascade)
  144. def detect(img, imageId):
  145. srcImgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
  146. faces = cascade.detectMultiScale(srcImgGray)
  147. for rect in faces:
  148. left, top, width, height = rect[0], rect[1], rect[2], rect[3]
  149. addDetection(detections, imageId, left, top, width, height, score=1.0)
  150. for i in range(len(dataset['images'])):
  151. sys.stdout.write('\r%d / %d' % (i + 1, len(dataset['images'])))
  152. sys.stdout.flush()
  153. img = cv.imread(dataset['images'][i]['file_name'])
  154. imageId = int(dataset['images'][i]['id'])
  155. detect(img, imageId)
  156. with open('detections.json', 'wt') as f:
  157. json.dump(detections, f)
  158. evaluate()
  159. def rm(f):
  160. if os.path.exists(f):
  161. os.remove(f)
  162. rm('annotations.json')
  163. rm('detections.json')