-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutil.py
More file actions
90 lines (71 loc) · 2.54 KB
/
util.py
File metadata and controls
90 lines (71 loc) · 2.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import numpy as np
import joblib
import json
import base64
import cv2
from wavelet import w2d
__class_name_to_number = {}
__class_number_to_name = {}
__model = None
face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_eye.xml')
def classify_image(b64, path=None):
imgs = face_detected(path, b64)
result = []
for img in imgs:
scaled_raw_img = cv2.resize(img, (32, 32))
img_har = w2d(img, "db1", 5)
scaled_img_har = cv2.resize(img_har, (32, 32))
combined_img = np.vstack((scaled_raw_img.reshape(
32*32*3, 1), scaled_img_har.reshape(32*32, 1)))
len_image_array = 32*32*3+32*32
final = combined_img.reshape(1, len_image_array).astype(float)
result.append({
'class': class_number_to_name(__model.predict(final)[0]),
'class_probability': np.around(__model.predict_proba(final)*100,2).tolist()[0],
'class_dictionary': __class_name_to_number
})
return result
def class_number_to_name(class_num):
return __class_number_to_name[class_num]
def load_saved_artifacts():
print("loading saved artifacts...start")
global __class_name_to_number
global __class_number_to_name
with open("./artifacts/class_dictionary.json", "r") as f:
__class_name_to_number = json.load(f)
__class_number_to_name = {v: k for k,
v in __class_name_to_number.items()}
global __model
if __model is None:
with open('./artifacts/saved_model.pkl', 'rb') as f:
__model = joblib.load(f)
print("loading saved artifacts...done")
def get_cv2_image_from_b64(b64):
ed = b64.split(",")[1]
nparr = np.frombuffer(base64.b64decode(ed), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def face_detected(path, b64):
if path:
img = cv2.imread(path)
else:
img = get_cv2_image_from_b64(b64)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
cropped_faces = []
for (x, y, w, h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
cropped_faces.append(roi_color)
return cropped_faces
def b64_image():
with open("./b64.txt") as f:
return f.read()
if __name__ == "__main__":
load_saved_artifacts()
x=classify_image( None,"./r.jpg")
print(x)