|
1 | 1 | import sys
|
2 | 2 | sys.path.append('.')
|
3 | 3 |
|
4 |
| -import cv2 |
5 |
| -import numpy as np |
6 | 4 | from flask import Flask, request, jsonify
|
7 | 5 | from time import gmtime, strftime
|
8 |
| -import logging |
9 |
| -import uuid |
10 |
| -from flask_cors import CORS |
11 | 6 | import os
|
12 | 7 | import base64
|
13 |
| - |
14 |
| -from facewrapper.facewrapper import InitEngine |
15 |
| -from facewrapper.facewrapper import GetLiveness |
16 |
| -from facewrapper.facewrapper import ProcessAll |
17 |
| -from facewrapper.facewrapper import CompareFace |
18 |
| - |
19 | 8 | import json
|
| 9 | +import cv2 |
| 10 | +import numpy as np |
20 | 11 |
|
21 |
| -CUSTOMER_TOKENS = [ |
22 |
| -####### 07.05 ####### |
23 |
| - ] |
24 |
| - |
| 12 | +from facewrapper.facewrapper import ttv_version |
| 13 | +from facewrapper.facewrapper import ttv_get_hwid |
| 14 | +from facewrapper.facewrapper import ttv_init |
| 15 | +from facewrapper.facewrapper import ttv_init_offline |
| 16 | +from facewrapper.facewrapper import ttv_detect_face |
25 | 17 |
|
26 |
| -app = Flask(__name__) |
27 |
| -CORS(app) |
| 18 | +app = Flask(__name__) |
28 | 19 |
|
29 |
| -licensePath = os.path.abspath(os.path.dirname(__file__)) + '/facewrapper/license.txt' |
30 |
| -InitEngine(licensePath.encode('utf-8')) |
| 20 | +app.config['SITE'] = "http://0.0.0.0:8000/" |
| 21 | +app.config['DEBUG'] = False |
31 | 22 |
|
32 |
| -@app.route('/face/liveness', methods=['POST']) |
33 |
| -def detect_livness(): |
34 |
| - print('>>>>>>>>>>>>>/face/liveness', strftime("%Y-%m-%d %H:%M:%S", gmtime()), '\t\t\t', request.remote_addr) |
35 |
| - app.logger.info(request.remote_addr) |
| 23 | +licenseKey = "XXXXX-XXXXX-XXXXX-XXXXX" |
| 24 | +licensePath = "license.txt" |
| 25 | +modelFolder = os.path.abspath(os.path.dirname(__file__)) + '/facewrapper/dict' |
36 | 26 |
|
37 |
| - file = request.files['image'] |
| 27 | +version = ttv_version() |
| 28 | +print("version: ", version.decode('utf-8')) |
38 | 29 |
|
39 |
| - image = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_COLOR) |
40 |
| - file_name = uuid.uuid4().hex[:6] |
41 |
| - save_path = 'dump2/' + file_name + '.png' |
42 |
| - cv2.imwrite(save_path, image) |
43 |
| - |
44 |
| - bbox = np.zeros([4], dtype=np.int32) |
45 |
| - live_score = GetLiveness(image, image.shape[1], image.shape[0], bbox) |
46 |
| - |
47 |
| - if live_score == 1: |
48 |
| - result = "Genuine" |
49 |
| - elif live_score == -102: |
50 |
| - result = "Face not detected" |
51 |
| - elif live_score == -103: |
52 |
| - result = "Liveness failed" |
53 |
| - elif live_score == 0: |
54 |
| - result = "Spoof" |
55 |
| - elif live_score == -3: |
56 |
| - result = "Face is too small" |
57 |
| - elif live_score == -4: |
58 |
| - result = "Face is too large" |
59 |
| - else: |
60 |
| - result = "Error" |
61 |
| - status = "ok" |
| 30 | +ret = ttv_init(modelFolder.encode('utf-8'), licenseKey.encode('utf-8')) |
| 31 | +if ret != 0: |
| 32 | + print(f"online init failed: {ret}"); |
62 | 33 |
|
63 |
| - response = jsonify({"status": status, "data": {"result": result, "box": {"x": int(bbox[0]), "y": int(bbox[1]), "w": int(bbox[2] - bbox[0] + 1), "h" : int(bbox[3] - bbox[1] + 1)}, "score": live_score}}) |
| 34 | + hwid = ttv_get_hwid() |
| 35 | + print("hwid: ", hwid.decode('utf-8')) |
64 | 36 |
|
65 |
| - response.status_code = 200 |
66 |
| - response.headers["Content-Type"] = "application/json; charset=utf-8" |
67 |
| - return response |
| 37 | + ret = ttv_init_offline(modelFolder.encode('utf-8'), licensePath.encode('utf-8')) |
| 38 | + if ret != 0: |
| 39 | + print(f"offline init failed: {ret}") |
| 40 | + exit(-1) |
| 41 | + else: |
| 42 | + print(f"offline init ok") |
68 | 43 |
|
69 |
| -@app.route('/face/attribute', methods=['POST']) |
70 |
| -def processAll(): |
71 |
| - print('>>>>>>>>>>>>>/face/attribute', strftime("%Y-%m-%d %H:%M:%S", gmtime()), '\t\t\t', request.remote_addr) |
72 |
| - app.logger.info(request.remote_addr) |
| 44 | +else: |
| 45 | + print(f"online init ok") |
73 | 46 |
|
| 47 | +@app.route('/api/liveness', methods=['POST']) |
| 48 | +def check_liveness(): |
74 | 49 | file = request.files['image']
|
75 |
| - |
76 | 50 | image = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_COLOR)
|
77 |
| - file_name = uuid.uuid4().hex[:6] |
78 |
| - save_path = 'dump2/' + file_name + '.png' |
79 |
| - cv2.imwrite(save_path, image) |
80 |
| - |
81 |
| - bbox = np.zeros([4], dtype=np.int32) |
82 |
| - attribute = np.zeros([4], dtype=np.int32) |
83 |
| - angles = np.zeros([3], dtype=np.float) |
84 |
| - liveness = np.zeros([1], dtype=np.int32) |
85 |
| - age = np.zeros([1], dtype=np.int32) |
86 |
| - gender = np.zeros([1], dtype=np.int32) |
87 |
| - mask = np.zeros([1], dtype=np.int32) |
88 |
| - feature = np.zeros([4096], dtype=np.uint8) |
89 |
| - featureSize = np.zeros([1], dtype=np.int32) |
90 |
| - ret = ProcessAll(image, image.shape[1], image.shape[0], bbox, attribute, angles, liveness, age, gender, mask, feature, featureSize, 0) |
91 |
| - |
92 |
| - print("facebox: ", bbox[0], " ", bbox[1], " ", bbox[2], " ", bbox[3]) |
93 |
| - print(f"wearGlasses: {attribute[0]}, leftEyeOpen: {attribute[1]}, rightEyeOpen: {attribute[2]}, mouthClose: {attribute[3]}") |
94 |
| - print(f"roll: {angles[0]} yaw: {angles[1]}, pitch: {angles[2]}") |
95 |
| - print(f"liveness: {liveness[0]}") |
96 |
| - print(f"age: {age[0]}") |
97 |
| - print(f"gender: {gender[0]}") |
98 |
| - print(f"mask: {mask[0]}") |
99 |
| - print(f"feature size: {featureSize[0]}") |
100 |
| - |
101 |
| - if ret == 0: |
102 |
| - result = "Face detected" |
103 |
| - elif ret == -1: |
104 |
| - result = "Engine not inited" |
| 51 | + |
| 52 | + faceRect = np.zeros([4], dtype=np.int32) |
| 53 | + livenessScore = np.zeros([1], dtype=np.double) |
| 54 | + angles = np.zeros([3], dtype=np.double) |
| 55 | + ret = ttv_detect_face(image, image.shape[1], image.shape[0], faceRect, livenessScore, angles) |
| 56 | + if ret == -1: |
| 57 | + result = "license error!" |
105 | 58 | elif ret == -2:
|
106 |
| - result = "No face detected" |
| 59 | + result = "init error!" |
| 60 | + elif ret == 0: |
| 61 | + result = "no face detected!" |
| 62 | + elif ret > 1: |
| 63 | + result = "multiple face detected!" |
| 64 | + elif faceRect[0] < 0 or faceRect[1] < 0 or faceRect[2] >= image.shape[1] or faceRect[2] >= image.shape[0]: |
| 65 | + result = "faace is in boundary!" |
| 66 | + elif livenessScore[0] > 0: |
| 67 | + result = "genuine" |
107 | 68 | else:
|
108 |
| - result = "Error" |
| 69 | + result = "spoof" |
| 70 | + |
109 | 71 | status = "ok"
|
110 |
| - |
111 |
| - response = jsonify({"status": status, "data": {"result": result, "box": {"x": int(bbox[0]), "y": int(bbox[1]), "w": int(bbox[2] - bbox[0] + 1), "h" : int(bbox[3] - bbox[1] + 1)}, |
112 |
| - "attr": {"wear_glasses": int(attribute[0]), "left_eye_open": int(attribute[1]), "right_eye_open": int(attribute[2]), "mouth_close": int(attribute[3])}, |
113 |
| - "angles": {"roll": float(angles[0]), "yaw": float(angles[1]), "pitch": float(angles[2])}, |
114 |
| - "liveness": int(liveness[0]), |
115 |
| - "age": int(age[0]), |
116 |
| - "gender": int(gender[0]), |
117 |
| - "mask": int(mask[0]) |
118 |
| - }}) |
| 72 | + response = jsonify({"status": status, "data": {"result": result, "face_rect": {"x": int(faceRect[0]), "y": int(faceRect[1]), "w": int(faceRect[2] - faceRect[0] + 1), "h" : int(faceRect[3] - faceRect[1] + 1)}, "liveness_score": livenessScore[0], |
| 73 | + "angles": {"yaw": angles[0], "roll": angles[1], "pitch": angles[2]}}}) |
119 | 74 |
|
120 | 75 | response.status_code = 200
|
121 | 76 | response.headers["Content-Type"] = "application/json; charset=utf-8"
|
122 | 77 | return response
|
123 | 78 |
|
124 |
| -@app.route('/face/compare', methods=['POST']) |
125 |
| -def compareFace(): |
126 |
| - print('>>>>>>>>>>>>>/face/compare', strftime("%Y-%m-%d %H:%M:%S", gmtime()), '\t\t\t', request.remote_addr) |
127 |
| - app.logger.info(request.remote_addr) |
128 |
| - |
129 |
| - file1 = request.files['image1'] |
130 |
| - image1 = cv2.imdecode(np.fromstring(file1.read(), np.uint8), cv2.IMREAD_COLOR) |
131 |
| - |
132 |
| - file2 = request.files['image2'] |
133 |
| - image2 = cv2.imdecode(np.fromstring(file2.read(), np.uint8), cv2.IMREAD_COLOR) |
134 |
| - |
135 |
| - bbox1 = np.zeros([4], dtype=np.int32) |
136 |
| - attribute1 = np.zeros([4], dtype=np.int32) |
137 |
| - angles1 = np.zeros([3], dtype=np.float) |
138 |
| - liveness1 = np.zeros([1], dtype=np.int32) |
139 |
| - age1 = np.zeros([1], dtype=np.int32) |
140 |
| - gender1 = np.zeros([1], dtype=np.int32) |
141 |
| - mask1 = np.zeros([1], dtype=np.int32) |
142 |
| - feature1 = np.zeros([4096], dtype=np.uint8) |
143 |
| - featureSize1 = np.zeros([1], dtype=np.int32) |
144 |
| - ret = ProcessAll(image1, image1.shape[1], image1.shape[0], bbox1, attribute1, angles1, liveness1, age1, gender1, mask1, feature1, featureSize1, 0) |
145 |
| - |
146 |
| - print('image1 results>>>>>>>>') |
147 |
| - print("facebox: ", bbox1[0], " ", bbox1[1], " ", bbox1[2], " ", bbox1[3]) |
148 |
| - print(f"wearGlasses: {attribute1[0]}, leftEyeOpen: {attribute1[1]}, rightEyeOpen: {attribute1[2]}, mouthClose: {attribute1[3]}") |
149 |
| - print(f"roll: {angles1[0]} yaw: {angles1[1]}, pitch: {angles1[2]}") |
150 |
| - print(f"liveness: {liveness1[0]}") |
151 |
| - print(f"age: {age1[0]}") |
152 |
| - print(f"gender: {gender1[0]}") |
153 |
| - print(f"mask: {mask1[0]}") |
154 |
| - print(f"feature size: {featureSize1[0]}") |
155 |
| - print("<<<<<<<<<<<<<<") |
156 |
| - |
157 |
| - if ret != 0: |
158 |
| - if ret == -1: |
159 |
| - result = "Engine not inited" |
160 |
| - elif ret == -2: |
161 |
| - result = "No face detected in image1" |
162 |
| - else: |
163 |
| - result = "Error in image1" |
164 |
| - |
165 |
| - response = jsonify({"status": status, "data": {"result": result}}) |
166 |
| - response.status_code = 200 |
167 |
| - response.headers["Content-Type"] = "application/json; charset=utf-8" |
168 |
| - return response |
169 |
| - |
170 |
| - bbox2 = np.zeros([4], dtype=np.int32) |
171 |
| - attribute2 = np.zeros([4], dtype=np.int32) |
172 |
| - angles2 = np.zeros([3], dtype=np.float) |
173 |
| - liveness2 = np.zeros([1], dtype=np.int32) |
174 |
| - age2 = np.zeros([1], dtype=np.int32) |
175 |
| - gender2 = np.zeros([1], dtype=np.int32) |
176 |
| - mask2 = np.zeros([1], dtype=np.int32) |
177 |
| - feature2 = np.zeros([4096], dtype=np.uint8) |
178 |
| - featureSize2 = np.zeros([1], dtype=np.int32) |
179 |
| - ret = ProcessAll(image2, image2.shape[1], image2.shape[0], bbox2, attribute2, angles2, liveness2, age2, gender2, mask2, feature2, featureSize2, 1) |
180 |
| - |
181 |
| - print('image2 results>>>>>>>>') |
182 |
| - print("facebox: ", bbox2[0], " ", bbox2[1], " ", bbox2[2], " ", bbox2[3]) |
183 |
| - print(f"wearGlasses: {attribute2[0]}, leftEyeOpen: {attribute2[1]}, rightEyeOpen: {attribute2[2]}, mouthClose: {attribute2[3]}") |
184 |
| - print(f"roll: {angles2[0]} yaw: {angles2[1]}, pitch: {angles2[2]}") |
185 |
| - print(f"liveness: {liveness2[0]}") |
186 |
| - print(f"age: {age2[0]}") |
187 |
| - print(f"gender: {gender2[0]}") |
188 |
| - print(f"mask: {mask2[0]}") |
189 |
| - print(f"feature size: {featureSize2[0]}") |
190 |
| - print("<<<<<<<<<<<<<<") |
191 |
| - |
192 |
| - if ret != 0: |
193 |
| - if ret == -1: |
194 |
| - result = "Engine not inited" |
195 |
| - elif ret == -2: |
196 |
| - result = "No face detected in image2" |
197 |
| - else: |
198 |
| - result = "Error in image2" |
199 |
| - |
200 |
| - response = jsonify({"status": status, "data": {"result": result}}) |
201 |
| - response.status_code = 200 |
202 |
| - response.headers["Content-Type"] = "application/json; charset=utf-8" |
203 |
| - return response |
204 |
| - |
205 |
| - confidence = CompareFace(feature1, featureSize1[0], feature2, featureSize2[0]) |
206 |
| - if confidence > 0.82: |
207 |
| - result = "Same" |
| 79 | +@app.route('/api/liveness_base64', methods=['POST']) |
| 80 | +def check_liveness_base64(): |
| 81 | + content = request.get_json() |
| 82 | + imageBase64 = content['image'] |
| 83 | + image = cv2.imdecode(np.frombuffer(base64.b64decode(imageBase64), dtype=np.uint8), cv2.IMREAD_COLOR) |
| 84 | + |
| 85 | + faceRect = np.zeros([4], dtype=np.int32) |
| 86 | + livenessScore = np.zeros([1], dtype=np.double) |
| 87 | + angles = np.zeros([3], dtype=np.double) |
| 88 | + ret = ttv_detect_face(image, image.shape[1], image.shape[0], faceRect, livenessScore, angles) |
| 89 | + if ret == -1: |
| 90 | + result = "license error!" |
| 91 | + elif ret == -2: |
| 92 | + result = "init error!" |
| 93 | + elif ret == 0: |
| 94 | + result = "no face detected!" |
| 95 | + elif ret > 1: |
| 96 | + result = "multiple face detected!" |
| 97 | + elif faceRect[0] < 0 or faceRect[1] < 0 or faceRect[2] >= image.shape[1] or faceRect[2] >= image.shape[0]: |
| 98 | + result = "faace is in boundary!" |
| 99 | + elif livenessScore[0] > 0: |
| 100 | + result = "genuine" |
208 | 101 | else:
|
209 |
| - result = "Different" |
| 102 | + result = "spoof" |
| 103 | + |
210 | 104 | status = "ok"
|
211 |
| - |
212 |
| - response = jsonify({"status": status, "data": {"result": result, "similarity": float(confidence)}}) |
| 105 | + response = jsonify({"status": status, "data": {"result": result, "face_rect": {"x": int(faceRect[0]), "y": int(faceRect[1]), "w": int(faceRect[2] - faceRect[0] + 1), "h" : int(faceRect[3] - faceRect[1] + 1)}, "liveness_score": livenessScore[0], |
| 106 | + "angles": {"yaw": angles[0], "roll": angles[1], "pitch": angles[2]}}}) |
213 | 107 |
|
214 | 108 | response.status_code = 200
|
215 | 109 | response.headers["Content-Type"] = "application/json; charset=utf-8"
|
216 | 110 | return response
|
217 | 111 |
|
| 112 | + |
| 113 | +if __name__ == '__main__': |
| 114 | + port = int(os.environ.get("PORT", 8000)) |
| 115 | + app.run(host='0.0.0.0', port=port) |
0 commit comments