pip install setuptools
cd python_api
pip install
from face_sdk_3divi import FacerecService
SDK_PATH = "/path/to/face_sdk"
dll_dir = SDK_PATH + "/lib/libfacerec.so"
conf_dir = SDK_PATH + "/conf/facerec"
license_dir = SDK_PATH + "/license"
service = FacerecService.create_service(dll_dir, conf_dir, license_dir)
with open(“path/to/input/image”, "rb") as file:
input_image = file.read()
input_context = service.create_context_from_encoded_image(input_image)
configDict = {};
# mandatory, specify the name of processing block
configDict["unit_type"] = "<name_of_processing_block>"
# if omitted, the default value will be used
configDict["modification"] = "<modification>"
# if not specified, the first version of the modification will be used
configDict["version"] = <version>
# the default models are located in the Face SDK distribution directory: share/processing_block/<modification>/(<version>/ or <version>.enc)
# you can set your own path to the model
configDict["model_path"] = "<path_to_model_file>"
# default location of the onnxruntime library in Face SDK folder for Linux platfrom or the "bin" folder for Windows platfrom
# you can specify your own path to onnxruntime library
# if value is not specified, the os-specific default search order will be used
configDict["ONNXRuntime"]["library_path"] = "../lib" # for Linux
configDict["ONNXRuntime"]["library_path"] = "../bin" # for Windows
# optional, "true" if you want to use GPU acceleration (CUDA) for processing block that support it
configDict["use_cuda"] = False
processing_block = service.create_processing_block(configDict);
# Creating Processing Blocks
detector = service.create_processing_block({“unit_type”: “FACE_DETECTOR”})
fitter = service.create_processing_block({“unit_type”: “FACE_FITTER”})
liveness = service.create_processing_block({“unit_type”: “LIVENESS_ESTIMATOR”})
template_extractor = service.create_processing_block({“unit_type”: “FACE_TEMPLATE_EXTRACTOR”})
verifier = service.create_processing_block({“unit_type”: “VERIFICATION_MODULE ”})
# Process created input Context
# 1. Detect face and face keypoints
detector(input_context)
fitter(input_context)
# 2. Check liveness
liveness(input_context)
is_alive = input_context["objects"][0]["liveness"][“value”].get_value()
# 3. Extract template
template_extractor(input_context)
# 4. Verify identity against stored template
verificationData = service.create_context({})
verificationData["template1"] = input_context["objects"][0]["face_template"]
verificationData["template2"]["template"] = stored_face_template
verificationModule(verificationData)
result = verificationData["result"]
score = result["score"].get_value();