From 673fe0f2c8d21f8a67b1f5de74bb1f62c7bbc3e2 Mon Sep 17 00:00:00 2001 From: harisreedhar Date: Thu, 31 Oct 2024 23:17:58 +0530 Subject: [PATCH] changes --- facefusion/processors/choices.py | 3 +- facefusion/processors/modules/deep_swapper.py | 244 ++++++++++++++++++ facefusion/processors/typing.py | 8 + .../uis/components/deep_swapper_options.py | 46 ++++ facefusion/uis/components/preview.py | 1 + facefusion/uis/layouts/benchmark.py | 5 +- facefusion/uis/layouts/default.py | 5 +- facefusion/uis/layouts/webcam.py | 5 +- facefusion/uis/typing.py | 1 + facefusion/wording.py | 2 + 10 files changed, 316 insertions(+), 4 deletions(-) create mode 100755 facefusion/processors/modules/deep_swapper.py create mode 100755 facefusion/uis/components/deep_swapper_options.py diff --git a/facefusion/processors/choices.py b/facefusion/processors/choices.py index 3380f38b..89c87138 100755 --- a/facefusion/processors/choices.py +++ b/facefusion/processors/choices.py @@ -1,9 +1,10 @@ from typing import List, Sequence from facefusion.common_helper import create_float_range, create_int_range -from facefusion.processors.typing import AgeModifierModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel +from facefusion.processors.typing import AgeModifierModel, DeepSwapperModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ] +deep_swapper_models : List[DeepSwapperModel] = [ 'jackie_chan' ] expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ] face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race' ] face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ] diff --git a/facefusion/processors/modules/deep_swapper.py b/facefusion/processors/modules/deep_swapper.py new file mode 100755 index 00000000..0d34d2b7 --- /dev/null +++ b/facefusion/processors/modules/deep_swapper.py @@ -0,0 +1,244 @@ +from argparse import ArgumentParser +from typing import List, Tuple + +import cv2 +import numpy + +import facefusion.jobs.job_manager +import facefusion.jobs.job_store +import facefusion.processors.core as processors +from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording +from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.face_analyser import get_many_faces, get_one_face +from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 +from facefusion.face_masker import create_occlusion_mask, create_static_box_mask +from facefusion.face_selector import find_similar_faces, sort_and_filter_faces +from facefusion.face_store import get_reference_faces +from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension +from facefusion.processors import choices as processors_choices +from facefusion.processors.typing import DeepSwapperInputs +from facefusion.program_helper import find_argument_group +from facefusion.thread_helper import thread_semaphore +from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.vision import read_image, read_static_image, write_image + +MODEL_SET : ModelSet =\ +{ + 'jackie_chan': + { + 'hashes': + { + 'deep_swapper': + { + 'url': 'https://huggingface.co/bluefoxcreation/DFM/resolve/main/Jackie_Chan.hash', + 'path': resolve_relative_path('../.assets/models/Jackie_Chan.hash') + } + }, + 'sources': + { + 'deep_swapper': + { + 'url': 'https://github.com/iperov/DeepFaceLive/releases/download/JACKIE_CHAN/Jackie_Chan.dfm', + 'path': resolve_relative_path('../.assets/models/Jackie_Chan.dfm') + } + }, + 'template': 'arcface_128_v2', + 'size': (224, 224) + } +} + + +def get_inference_pool() -> InferencePool: + model_sources = get_model_options().get('sources') + model_context = __name__ + '.' + state_manager.get_item('deep_swapper_model') + return inference_manager.get_inference_pool(model_context, model_sources) + + +def clear_inference_pool() -> None: + model_context = __name__ + '.' + state_manager.get_item('deep_swapper_model') + inference_manager.clear_inference_pool(model_context) + + +def get_model_options() -> ModelOptions: + deep_swapper_model = state_manager.get_item('deep_swapper_model') + return MODEL_SET.get(deep_swapper_model) + + +def register_args(program : ArgumentParser) -> None: + group_processors = find_argument_group(program, 'processors') + if group_processors: + group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors.deep_swapper_model', 'jackie_chan'), choices = processors_choices.deep_swapper_models) + facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model' ]) + + +def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: + apply_state_item('deep_swapper_model', args.get('deep_swapper_model')) + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + model_hashes = get_model_options().get('hashes') + model_sources = get_model_options().get('sources') + + return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + + +def pre_process(mode : ProcessMode) -> bool: + if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): + logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__) + return False + if mode == 'output' and not in_directory(state_manager.get_item('output_path')): + logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) + return False + if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): + logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) + return False + return True + + +def post_process() -> None: + read_static_image.cache_clear() + if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]: + clear_inference_pool() + if state_manager.get_item('video_memory_strategy') == 'strict': + content_analyser.clear_inference_pool() + face_classifier.clear_inference_pool() + face_detector.clear_inference_pool() + face_landmarker.clear_inference_pool() + face_masker.clear_inference_pool() + face_recognizer.clear_inference_pool() + + +def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: + model_template = get_model_options().get('template') + model_size = get_model_options().get('size') + crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size) + crop_vision_frame_raw = crop_vision_frame.copy() + box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding')) + crop_masks =\ + [ + box_mask + ] + + if 'occlusion' in state_manager.get_item('face_mask_types'): + occlusion_mask = create_occlusion_mask(crop_vision_frame) + crop_masks.append(occlusion_mask) + + crop_vision_frame = prepare_crop_frame(crop_vision_frame) + crop_vision_frame, crop_source_mask, crop_target_mask = forward(crop_vision_frame) + crop_vision_frame = normalize_crop_frame(crop_vision_frame) + crop_vision_frame = match_frame_color_with_mask(crop_vision_frame_raw, crop_vision_frame, crop_source_mask, crop_target_mask) + crop_masks.append(feather_crop_mask(crop_source_mask)) + crop_masks.append(feather_crop_mask(crop_target_mask)) + crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1) + paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) + return paste_vision_frame + + +def forward(crop_vision_frame : VisionFrame) -> Tuple[VisionFrame, Mask, Mask]: + deep_swapper = get_inference_pool().get('deep_swapper') + deep_swapper_inputs = {} + + for deep_swapper_input in deep_swapper.get_inputs(): + if deep_swapper_input.name == 'in_face:0': + deep_swapper_inputs[deep_swapper_input.name] = crop_vision_frame + if deep_swapper_input.name == 'morph_value:0': + morph_value = numpy.array([ 1 ]).astype(numpy.float32) + deep_swapper_inputs[deep_swapper_input.name] = morph_value + + with thread_semaphore(): + crop_target_mask, crop_vision_frame, crop_source_mask = deep_swapper.run(None, deep_swapper_inputs) + + return crop_vision_frame[0], crop_source_mask[0], crop_target_mask[0] + + +def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: + crop_vision_frame = cv2.addWeighted(crop_vision_frame, 1.5, cv2.GaussianBlur(crop_vision_frame, (0, 0), 2), -0.5, 0) + crop_vision_frame = crop_vision_frame / 255.0 + crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0).astype(numpy.float32) + return crop_vision_frame + + +def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: + crop_vision_frame = (crop_vision_frame * 255.0).clip(0, 255) + crop_vision_frame = crop_vision_frame.astype(numpy.uint8) + return crop_vision_frame + + +def feather_crop_mask(crop_source_mask : Mask) -> Mask: + model_size = get_model_options().get('size') + crop_mask = crop_source_mask.reshape(model_size).clip(0, 1) + crop_mask = cv2.erode(crop_mask, numpy.ones((7, 7), numpy.uint8), iterations = 1) + crop_mask = cv2.GaussianBlur(crop_mask, (15, 15), 0) + return crop_mask + + +def match_frame_color_with_mask(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame, source_mask : Mask, target_mask : Mask) -> VisionFrame: + target_lab_frame = cv2.cvtColor(target_vision_frame, cv2.COLOR_BGR2LAB).astype(numpy.float32) / 255 + source_lab_frame = cv2.cvtColor(source_vision_frame, cv2.COLOR_BGR2LAB).astype(numpy.float32) / 255 + source_mask = (source_mask > 0.5).astype(numpy.float32) + target_mask = (target_mask > 0.5).astype(numpy.float32) + target_lab_filter = target_lab_frame * cv2.cvtColor(source_mask, cv2.COLOR_GRAY2BGR) + source_lab_filter = source_lab_frame * cv2.cvtColor(target_mask, cv2.COLOR_GRAY2BGR) + target_lab_frame -= target_lab_filter.mean(axis = ( 0, 1 )) + target_lab_frame /= target_lab_filter.std(axis = ( 0, 1 )) + 1e-6 + target_lab_frame *= source_lab_filter.std(axis = ( 0, 1 )) + target_lab_frame += source_lab_filter.mean(axis = ( 0, 1 )) + target_lab_frame = numpy.multiply(target_lab_frame.clip(0, 1), 255).astype(numpy.uint8) + target_vision_frame = cv2.cvtColor(target_lab_frame, cv2.COLOR_LAB2BGR) + return target_vision_frame + + +def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: + return swap_face(target_face, temp_vision_frame) + + +def process_frame(inputs : DeepSwapperInputs) -> VisionFrame: + reference_faces = inputs.get('reference_faces') + target_vision_frame = inputs.get('target_vision_frame') + many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ])) + + if state_manager.get_item('face_selector_mode') == 'many': + if many_faces: + for target_face in many_faces: + target_vision_frame = swap_face(target_face, target_vision_frame) + if state_manager.get_item('face_selector_mode') == 'one': + target_face = get_one_face(many_faces) + if target_face: + target_vision_frame = swap_face(target_face, target_vision_frame) + if state_manager.get_item('face_selector_mode') == 'reference': + similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance')) + if similar_faces: + for similar_face in similar_faces: + target_vision_frame = swap_face(similar_face, target_vision_frame) + return target_vision_frame + + +def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: + reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None + + for queue_payload in process_manager.manage(queue_payloads): + target_vision_path = queue_payload['frame_path'] + target_vision_frame = read_image(target_vision_path) + output_vision_frame = process_frame( + { + 'reference_faces': reference_faces, + 'target_vision_frame': target_vision_frame + }) + write_image(target_vision_path, output_vision_frame) + update_progress(1) + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None + target_vision_frame = read_static_image(target_path) + output_vision_frame = process_frame( + { + 'reference_faces': reference_faces, + 'target_vision_frame': target_vision_frame + }) + write_image(output_path, output_vision_frame) + + +def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: + processors.multi_process_frames(None, temp_frame_paths, process_frames) diff --git a/facefusion/processors/typing.py b/facefusion/processors/typing.py index 13578f0f..b52138b4 100644 --- a/facefusion/processors/typing.py +++ b/facefusion/processors/typing.py @@ -5,6 +5,7 @@ from numpy._typing import NDArray from facefusion.typing import AppContext, AudioFrame, Face, FaceSet, VisionFrame AgeModifierModel = Literal['styleganex_age'] +DeepSwapperModel = Literal['jackie_chan'] ExpressionRestorerModel = Literal['live_portrait'] FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race'] FaceEditorModel = Literal['live_portrait'] @@ -21,6 +22,11 @@ AgeModifierInputs = TypedDict('AgeModifierInputs', 'reference_faces' : FaceSet, 'target_vision_frame' : VisionFrame }) +DeepSwapperInputs = TypedDict('DeepSwapperInputs', +{ + 'reference_faces' : FaceSet, + 'target_vision_frame' : VisionFrame +}) ExpressionRestorerInputs = TypedDict('ExpressionRestorerInputs', { 'reference_faces' : FaceSet, @@ -67,6 +73,7 @@ ProcessorStateKey = Literal\ [ 'age_modifier_model', 'age_modifier_direction', + 'deep_swapper_model', 'expression_restorer_model', 'expression_restorer_factor', 'face_debugger_items', @@ -100,6 +107,7 @@ ProcessorState = TypedDict('ProcessorState', { 'age_modifier_model' : AgeModifierModel, 'age_modifier_direction' : int, + 'deep_swapper_model' : DeepSwapperModel, 'expression_restorer_model' : ExpressionRestorerModel, 'expression_restorer_factor' : int, 'face_debugger_items' : List[FaceDebuggerItem], diff --git a/facefusion/uis/components/deep_swapper_options.py b/facefusion/uis/components/deep_swapper_options.py new file mode 100755 index 00000000..77b27572 --- /dev/null +++ b/facefusion/uis/components/deep_swapper_options.py @@ -0,0 +1,46 @@ +from typing import List, Optional + +import gradio + +from facefusion import state_manager, wording +from facefusion.processors import choices as processors_choices +from facefusion.processors.core import load_processor_module +from facefusion.processors.typing import FaceEnhancerModel +from facefusion.uis.core import get_ui_component, register_ui_component + +DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None + + +def render() -> None: + global DEEP_SWAPPER_MODEL_DROPDOWN + + DEEP_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( + label = wording.get('uis.deep_swapper_model_dropdown'), + choices = processors_choices.deep_swapper_models, + value = state_manager.get_item('deep_swapper_model'), + visible = 'deep_swapper' in state_manager.get_item('processors') + ) + register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN) + + +def listen() -> None: + DEEP_SWAPPER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = DEEP_SWAPPER_MODEL_DROPDOWN, outputs = DEEP_SWAPPER_MODEL_DROPDOWN) + + processors_checkbox_group = get_ui_component('processors_checkbox_group') + if processors_checkbox_group: + processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = DEEP_SWAPPER_MODEL_DROPDOWN) + + +def remote_update(processors : List[str]) -> gradio.Dropdown: + has_face_enhancer = 'deep_swapper' in processors + return gradio.Dropdown(visible = has_face_enhancer) + + +def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown: + deep_swapper_module = load_processor_module('deep_swapper') + deep_swapper_module.clear_inference_pool() + state_manager.set_item('deep_swapper_model', face_enhancer_model) + + if deep_swapper_module.pre_check(): + return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model')) + return gradio.Dropdown() diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index 53835bbe..0b7a2199 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -142,6 +142,7 @@ def listen() -> None: for ui_component in get_ui_components( [ 'age_modifier_model_dropdown', + 'deep_swapper_model_dropdown', 'expression_restorer_model_dropdown', 'processors_checkbox_group', 'face_editor_model_dropdown', diff --git a/facefusion/uis/layouts/benchmark.py b/facefusion/uis/layouts/benchmark.py index 72f75fb6..766492c1 100644 --- a/facefusion/uis/layouts/benchmark.py +++ b/facefusion/uis/layouts/benchmark.py @@ -2,7 +2,7 @@ import gradio from facefusion import state_manager from facefusion.download import conditional_download -from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors +from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, deep_swapper_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors def pre_check() -> bool: @@ -33,6 +33,8 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() with gradio.Blocks(): expression_restorer_options.render() with gradio.Blocks(): @@ -66,6 +68,7 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() expression_restorer_options.listen() face_debugger_options.listen() face_editor_options.listen() diff --git a/facefusion/uis/layouts/default.py b/facefusion/uis/layouts/default.py index b57a9b8d..054cc1f0 100755 --- a/facefusion/uis/layouts/default.py +++ b/facefusion/uis/layouts/default.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, common_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow +from facefusion.uis.components import about, age_modifier_options, common_options, deep_swapper_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow def pre_check() -> bool: @@ -18,6 +18,8 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() with gradio.Blocks(): expression_restorer_options.render() with gradio.Blocks(): @@ -79,6 +81,7 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() expression_restorer_options.listen() face_debugger_options.listen() face_editor_options.listen() diff --git a/facefusion/uis/layouts/webcam.py b/facefusion/uis/layouts/webcam.py index 6b059cc7..e57fb4b5 100644 --- a/facefusion/uis/layouts/webcam.py +++ b/facefusion/uis/layouts/webcam.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options +from facefusion.uis.components import about, age_modifier_options, deep_swapper_options, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options def pre_check() -> bool: @@ -18,6 +18,8 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() with gradio.Blocks(): expression_restorer_options.render() with gradio.Blocks(): @@ -50,6 +52,7 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() expression_restorer_options.listen() face_debugger_options.listen() face_editor_options.listen() diff --git a/facefusion/uis/typing.py b/facefusion/uis/typing.py index 1c13759d..f45135d8 100644 --- a/facefusion/uis/typing.py +++ b/facefusion/uis/typing.py @@ -9,6 +9,7 @@ ComponentName = Literal\ 'age_modifier_model_dropdown', 'benchmark_cycles_slider', 'benchmark_runs_checkbox_group', + 'deep_swapper_model_dropdown', 'expression_restorer_factor_slider', 'expression_restorer_model_dropdown', 'face_debugger_items_checkbox_group', diff --git a/facefusion/wording.py b/facefusion/wording.py index 23f19f5d..0f94d842 100755 --- a/facefusion/wording.py +++ b/facefusion/wording.py @@ -143,6 +143,7 @@ WORDING : Dict[str, Any] =\ 'processors': 'load a single or multiple processors (choices: {choices}, ...)', 'age_modifier_model': 'choose the model responsible for aging the face', 'age_modifier_direction': 'specify the direction in which the age should be modified', + 'deep_swapper_model': 'choose the model responsible for swapping the face', 'expression_restorer_model': 'choose the model responsible for restoring the expression', 'expression_restorer_factor': 'restore factor of expression from the target face', 'face_debugger_items': 'load a single or multiple processors (choices: {choices})', @@ -226,6 +227,7 @@ WORDING : Dict[str, Any] =\ 'benchmark_runs_checkbox_group': 'BENCHMARK RUNS', 'clear_button': 'CLEAR', 'common_options_checkbox_group': 'OPTIONS', + 'deep_swapper_model_dropdown': 'DEEP SWAPPER MODEL', 'execution_providers_checkbox_group': 'EXECUTION PROVIDERS', 'execution_queue_count_slider': 'EXECUTION QUEUE COUNT', 'execution_thread_count_slider': 'EXECUTION THREAD COUNT',