Skip to content

Commit b5d7734

Browse files
committed
Add improved alignment code for #50. Accuracy not yet tested.
1 parent 4962a48 commit b5d7734

File tree

1 file changed

+14
-94
lines changed

1 file changed

+14
-94
lines changed

openface/alignment/naive_dlib.py

+14-94
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class NaiveDlib:
3131
def __init__(self, faceMean, facePredictor):
3232
"""Initialize the dlib-based alignment."""
3333
self.detector = dlib.get_frontal_face_detector()
34-
self.normMeanAlignPoints = loadMeanPoints(faceMean)
34+
self.normMeanLandmarks = loadMeanPoints(faceMean)
3535
self.predictor = dlib.shape_predictor(facePredictor)
3636

3737
def getAllFaceBoundingBoxes(self, img):
@@ -46,18 +46,9 @@ def align(self, img, bb):
4646
points = self.predictor(img, bb)
4747
return list(map(lambda p: (p.x, p.y), points.parts()))
4848

49+
EYES_AND_NOSE = np.array([36, 45, 33])
4950
def alignImg(self, method, size, img, bb=None,
50-
outputPrefix=None, outputDebug=False,
51-
expandBox=False, alignPoints=None):
52-
if outputPrefix:
53-
helper.mkdirP(os.path.dirname(outputPrefix))
54-
55-
def getName(tag=None):
56-
if tag is None:
57-
return "{}.png".format(outputPrefix)
58-
else:
59-
return "{}-{}.png".format(outputPrefix, tag)
60-
51+
landmarks=None, landmarkIndices=EYES_AND_NOSE):
6152
if bb is None:
6253
try:
6354
bb = self.getLargestFaceBoundingBox(img)
@@ -69,91 +60,20 @@ def getName(tag=None):
6960
# Most failed detection attempts return here.
7061
return
7162

72-
if alignPoints is None:
73-
alignPoints = self.align(img, bb)
74-
meanAlignPoints = transformPoints(self.normMeanAlignPoints, bb, True)
75-
76-
(xs, ys) = zip(*meanAlignPoints)
77-
tightBb = dlib.rectangle(left=min(xs), right=max(xs),
78-
top=min(ys), bottom=max(ys))
79-
80-
if method != 'tightcrop':
81-
npAlignPoints = np.float32(alignPoints)
82-
npMeanAlignPoints = np.float32(meanAlignPoints)
83-
84-
if method == 'tightcrop':
85-
warpedImg = img
86-
elif method == 'affine':
87-
ss = np.array([39, 42, 57]) # Eyes and bottom lip.
88-
npAlignPointsSS = npAlignPoints[ss]
89-
npMeanAlignPointsSS = npMeanAlignPoints[ss]
90-
H = cv2.getAffineTransform(npAlignPointsSS, npMeanAlignPointsSS)
91-
warpedImg = cv2.warpAffine(img, H, np.shape(img)[0:2])
92-
elif method == 'perspective':
93-
ss = np.array([39, 42, 48, 54]) # Eyes and corners of mouth.
94-
npAlignPointsSS = npAlignPoints[ss]
95-
npMeanAlignPointsSS = npMeanAlignPoints[ss]
96-
H = cv2.getPerspectiveTransform(
97-
npAlignPointsSS, npMeanAlignPointsSS)
98-
warpedImg = cv2.warpPerspective(img, H, np.shape(img)[0:2])
99-
elif method == 'homography':
100-
(H, mask) = cv2.findHomography(npAlignPoints, npMeanAlignPoints,
101-
method=cv2.LMEDS)
102-
warpedImg = cv2.warpPerspective(img, H, np.shape(img)[0:2])
103-
else:
104-
print("Error: method '{}' is unimplemented.".format(method))
105-
sys.exit(-1)
63+
if landmarks is None:
64+
landmarks = self.align(img, bb)
10665

107-
if method == 'tightcrop':
108-
wAlignPoints = alignPoints
109-
else:
110-
wBb = self.getLargestFaceBoundingBox(warpedImg)
111-
if wBb is None:
112-
return
113-
wAlignPoints = self.align(warpedImg, wBb)
114-
wMeanAlignPoints = transformPoints(
115-
self.normMeanAlignPoints, wBb, True)
116-
117-
if outputDebug:
118-
annotatedImg = annotate(img, bb, alignPoints, meanAlignPoints)
119-
io.imsave(getName("orig"), img)
120-
io.imsave(getName("annotated"), annotatedImg)
121-
122-
if method != 'tightcrop':
123-
wAnnotatedImg = annotate(warpedImg, wBb,
124-
wAlignPoints, wMeanAlignPoints)
125-
io.imsave(getName("warped"), warpedImg)
126-
io.imsave(getName("warped-annotated"), wAnnotatedImg)
127-
128-
if len(warpedImg.shape) != 3:
129-
print(" + Warning: Result does not have 3 dimensions.")
130-
return None
131-
132-
(xs, ys) = zip(*wAlignPoints)
133-
xRange = max(xs) - min(xs)
134-
yRange = max(ys) - min(ys)
135-
if expandBox:
136-
(l, r, t, b) = (min(xs) - 0.20 * xRange, max(xs) + 0.20 * xRange,
137-
min(ys) - 0.65 * yRange, max(ys) + 0.20 * yRange)
138-
else:
139-
(l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))
140-
(w, h, _) = warpedImg.shape
141-
if 0 <= l <= w and 0 <= r <= w and 0 <= b <= h and 0 <= t <= h:
142-
cwImg = cv2.resize(warpedImg[t:b, l:r], (size, size))
143-
h, edges = np.histogram(cwImg.ravel(), 16, [0, 256])
144-
s = sum(h)
145-
if any(h > 0.65 * s):
146-
print("Warning: Image is likely a single color.")
147-
return
148-
else:
149-
print("Warning: Unable to align and crop to the "
150-
"face's bounding box.")
151-
return
66+
npLandmarks = np.float32(landmarks)
67+
npNormMeanLandmarks = np.float32(self.normMeanLandmarks)
15268

153-
if outputDebug:
154-
io.imsave(getName(), cwImg)
155-
return cwImg
69+
if method == 'affine':
70+
H = cv2.getAffineTransform(npLandmarks[landmarkIndices],
71+
size*npNormMeanLandmarks[landmarkIndices])
72+
thumbnail = cv2.warpAffine(img, H, (size, size))
73+
else:
74+
raise Exception('Unrecognized method: {}'.format(method))
15675

76+
return thumbnail
15777

15878
def transformPoints(points, bb, toImgCoords):
15979
if toImgCoords:

0 commit comments

Comments
 (0)