add pre-commit hooks configuration

main
Tran Xen 2 years ago
parent 8577d0186d
commit 5d4a29ff1e

@ -0,0 +1,14 @@
repos:
- repo: https://github.com/psf/black
rev: 23.7.0
hooks:
- id: black
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-docstring-first
- id: detect-private-key
- id: trailing-whitespace
- id: fix-byte-order-marker

@ -6,87 +6,153 @@ import base64, io
from io import BytesIO from io import BytesIO
from typing import List, Tuple, Optional from typing import List, Tuple, Optional
class InpaintingWhen(Enum): class InpaintingWhen(Enum):
NEVER = "Never" NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all" BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face" BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All" AFTER_ALL = "After All"
class FaceSwapUnit(BaseModel) :
class FaceSwapUnit(BaseModel):
# The image given in reference # The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) source_img: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
# The checkpoint file # The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None) source_face: str = Field(
description="face checkpoint (from models/faceswaplab/faces)",
examples=["my_face.pkl"],
default=None,
)
# base64 batch source images # base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) batch_images: Tuple[str] = Field(
description="list of base64 batch source images",
examples=[
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
],
default=None,
)
# Will blend faces if True # Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True) blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering # Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True) same_gender: bool = Field(description="Use same gender filtering", default=True)
# If True, discard images with low similarity # If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False) check_similarity: bool = Field(
description="If True, discard images with low similarity", default=False
)
# if True will compute similarity and add it to the image info # if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False) compute_similarity: bool = Field(
description="If True will compute similarity and add it to the image info",
default=False,
)
# Minimum similarity against the used face (reference, batch or checkpoint) # Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0) min_sim: float = Field(
description="Minimum similarity against the used face (reference, batch or checkpoint)",
default=0.0,
)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given) # Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0) min_ref_sim: float = Field(
description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)",
default=0.0,
)
# The face index to use for swapping # The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,)) faces_index: Tuple[int] = Field(
description="The face index to use for swapping, list of face numbers starting from 0",
default=(0,),
class PostProcessingOptions (BaseModel): )
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0) class PostProcessingOptions(BaseModel):
face_restorer_name: str = Field(description="face restorer name", default=None)
upscaler_name: str = Field(description='upscaler name', default=None) restorer_visibility: float = Field(
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0) description="face restorer visibility", default=1, le=1, ge=0
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0) )
codeformer_weight: float = Field(
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0) description="face restorer codeformer weight", default=1, le=1, ge=0
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]") )
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20) upscaler_name: str = Field(description="upscaler name", default=None)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler") scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER) upscale_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0
)
class FaceSwapRequest(BaseModel) :
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) inpainting_denoising_strengh: float = Field(
units : List[FaceSwapUnit] description="Inpainting denoising strenght", default=0, lt=1, ge=0
postprocessing : PostProcessingOptions )
inpainting_prompt: str = Field(
description="Inpainting denoising strenght",
class FaceSwapResponse(BaseModel) : examples=["Portrait of a [gender]"],
images : List[str] = Field(description='base64 swapped image',default=None) default="Portrait of a [gender]",
infos : List[str] )
inpainting_negative_prompt: str = Field(
description="Inpainting denoising strenght",
examples=[
"Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"
],
default="",
)
inpainting_steps: int = Field(
description="Inpainting steps",
examples=["Portrait of a [gender]"],
ge=1,
le=150,
default=20,
)
inpainting_sampler: str = Field(
description="Inpainting sampler", examples=["Euler"], default="Euler"
)
inpainting_when: InpaintingWhen = Field(
description="When inpainting happens",
examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER,
)
class FaceSwapRequest(BaseModel):
image: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions
class FaceSwapResponse(BaseModel):
images: List[str] = Field(description="base64 swapped image", default=None)
infos: List[str]
@property @property
def pil_images(self) : def pil_images(self):
return [base64_to_pil(img) for img in self.images] return [base64_to_pil(img) for img in self.images]
def pil_to_base64(img): def pil_to_base64(img):
if isinstance(img, str): if isinstance(img, str):
img = Image.open(img) img = Image.open(img)
buffer = BytesIO() buffer = BytesIO()
img.save(buffer, format='PNG') img.save(buffer, format="PNG")
img_data = buffer.getvalue() img_data = buffer.getvalue()
base64_data = base64.b64encode(img_data) base64_data = base64.b64encode(img_data)
return base64_data.decode('utf-8') return base64_data.decode("utf-8")
def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] : def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]:
if base64str is None : if base64str is None:
return None return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme if "base64," in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1] base64_data = base64str.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data) img_bytes = base64.b64decode(base64_data)
else: else:
# if no data URL scheme, just decode # if no data URL scheme, just decode

@ -1,39 +1,45 @@
import requests import requests
from PIL import Image from PIL import Image
from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64 from client_utils import (
FaceSwapRequest,
FaceSwapUnit,
PostProcessingOptions,
FaceSwapResponse,
pil_to_base64,
)
address = 'http://127.0.0.1:7860' address = "http://127.0.0.1:7860"
# First face unit : # First face unit :
unit1 = FaceSwapUnit( unit1 = FaceSwapUnit(
source_img=pil_to_base64("../../references/man.png"), # The face you want to use source_img=pil_to_base64("../../references/man.png"), # The face you want to use
faces_index=(0,) # Replace first face faces_index=(0,), # Replace first face
) )
# Second face unit : # Second face unit :
unit2 = FaceSwapUnit( unit2 = FaceSwapUnit(
source_img=pil_to_base64("../../references/woman.png"), # The face you want to use source_img=pil_to_base64("../../references/woman.png"), # The face you want to use
same_gender=True, same_gender=True,
faces_index=(0,) # Replace first woman since same gender is on faces_index=(0,), # Replace first woman since same gender is on
) )
# Post-processing config : # Post-processing config :
pp = PostProcessingOptions( pp = PostProcessingOptions(
face_restorer_name="CodeFormer", face_restorer_name="CodeFormer", codeformer_weight=0.5, restorer_visibility=1
codeformer_weight=0.5, )
restorer_visibility= 1)
# Prepare the request # Prepare the request
request = FaceSwapRequest ( request = FaceSwapRequest(
image = pil_to_base64("test_image.png"), image=pil_to_base64("test_image.png"), units=[unit1, unit2], postprocessing=pp
units= [unit1, unit2],
postprocessing=pp
) )
result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"}) result = requests.post(
url=f"{address}/faceswaplab/swap_face",
data=request.json(),
headers={"Content-Type": "application/json; charset=utf-8"},
)
response = FaceSwapResponse.parse_obj(result.json()) response = FaceSwapResponse.parse_obj(result.json())
for img, info in zip(response.pil_images, response.infos): for img, info in zip(response.pil_images, response.infos):
img.show(title = info) img.show(title=info)

@ -8,16 +8,26 @@ import urllib.request
req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt") req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")
models_dir = os.path.abspath("models/faceswaplab") models_dir = os.path.abspath("models/faceswaplab")
faces_dir = os.path.abspath(os.path.join("models","faceswaplab","faces")) faces_dir = os.path.abspath(os.path.join("models", "faceswaplab", "faces"))
model_url = "https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx" model_url = "https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx"
model_name = os.path.basename(model_url) model_name = os.path.basename(model_url)
model_path = os.path.join(models_dir, model_name) model_path = os.path.join(models_dir, model_name)
def download(url, path): def download(url, path):
request = urllib.request.urlopen(url) request = urllib.request.urlopen(url)
total = int(request.headers.get('Content-Length', 0)) total = int(request.headers.get("Content-Length", 0))
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress: with tqdm(
urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) total=total, desc="Downloading", unit="B", unit_scale=True, unit_divisor=1024
) as progress:
urllib.request.urlretrieve(
url,
path,
reporthook=lambda count, block_size, total_size: progress.update(
block_size
),
)
os.makedirs(models_dir, exist_ok=True) os.makedirs(models_dir, exist_ok=True)
os.makedirs(faces_dir, exist_ok=True) os.makedirs(faces_dir, exist_ok=True)

@ -0,0 +1,7 @@
[mypy]
check_untyped_defs = True
disallow_any_generics = True
disallow_untyped_calls = True
disallow_untyped_defs = True
ignore_missing_imports = True
strict_optional = False

@ -2,17 +2,20 @@ import importlib
from scripts.faceswaplab_api import faceswaplab_api from scripts.faceswaplab_api import faceswaplab_api
from scripts.faceswaplab_settings import faceswaplab_settings from scripts.faceswaplab_settings import faceswaplab_settings
from scripts.faceswaplab_ui import faceswaplab_tab, faceswaplab_unit_ui from scripts.faceswaplab_ui import faceswaplab_tab, faceswaplab_unit_ui
from scripts.faceswaplab_utils.models_utils import get_current_model, get_face_checkpoints from scripts.faceswaplab_utils.models_utils import (
get_current_model,
get_face_checkpoints,
)
from scripts import (faceswaplab_globals) from scripts import faceswaplab_globals
from scripts.faceswaplab_swapping import swapper from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils import faceswaplab_logging, imgutils from scripts.faceswaplab_utils import faceswaplab_logging, imgutils
from scripts.faceswaplab_utils import models_utils from scripts.faceswaplab_utils import models_utils
from scripts.faceswaplab_postprocessing import upscaling from scripts.faceswaplab_postprocessing import upscaling
import numpy as np import numpy as np
#Reload all the modules when using "apply and restart" # Reload all the modules when using "apply and restart"
#This is mainly done for development purposes # This is mainly done for development purposes
importlib.reload(swapper) importlib.reload(swapper)
importlib.reload(faceswaplab_logging) importlib.reload(faceswaplab_logging)
importlib.reload(faceswaplab_globals) importlib.reload(faceswaplab_globals)
@ -35,20 +38,25 @@ from modules import script_callbacks, scripts
from insightface.app.common import Face from insightface.app.common import Face
from modules import scripts, shared from modules import scripts, shared
from modules.images import save_image, image_grid from modules.images import save_image, image_grid
from modules.processing import (Processed, StableDiffusionProcessing, from modules.processing import (
StableDiffusionProcessingImg2Img) Processed,
StableDiffusionProcessing,
StableDiffusionProcessingImg2Img,
)
from modules.shared import opts from modules.shared import opts
from PIL import Image from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw) from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts.faceswaplab_globals import VERSION_FLAG from scripts.faceswaplab_globals import VERSION_FLAG
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab") EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab")
# Register the tab, done here to prevent it from being added twice # Register the tab, done here to prevent it from being added twice
@ -56,44 +64,44 @@ script_callbacks.on_ui_tabs(faceswaplab_tab.on_ui_tabs)
try: try:
import modules.script_callbacks as script_callbacks import modules.script_callbacks as script_callbacks
script_callbacks.on_app_started(faceswaplab_api.faceswaplab_api) script_callbacks.on_app_started(faceswaplab_api.faceswaplab_api)
except: except:
pass pass
class FaceSwapScript(scripts.Script): class FaceSwapScript(scripts.Script):
def __init__(self) -> None: def __init__(self) -> None:
logger.info(f"FaceSwapLab {VERSION_FLAG}") logger.info(f"FaceSwapLab {VERSION_FLAG}")
super().__init__() super().__init__()
@property @property
def units_count(self) : def units_count(self):
return opts.data.get("faceswaplab_units_count", 3) return opts.data.get("faceswaplab_units_count", 3)
@property @property
def upscaled_swapper_in_generated(self) : def upscaled_swapper_in_generated(self):
return opts.data.get("faceswaplab_upscaled_swapper", False) return opts.data.get("faceswaplab_upscaled_swapper", False)
@property @property
def upscaled_swapper_in_source(self) : def upscaled_swapper_in_source(self):
return opts.data.get("faceswaplab_upscaled_swapper_in_source", False) return opts.data.get("faceswaplab_upscaled_swapper_in_source", False)
@property @property
def enabled(self) -> bool : def enabled(self) -> bool:
"""Return True if any unit is enabled and the state is not interupted""" """Return True if any unit is enabled and the state is not interupted"""
return any([u.enable for u in self.units]) and not shared.state.interrupted return any([u.enable for u in self.units]) and not shared.state.interrupted
@property @property
def keep_original_images(self) : def keep_original_images(self):
return opts.data.get("faceswaplab_keep_original", False) return opts.data.get("faceswaplab_keep_original", False)
@property @property
def swap_in_generated_units(self) : def swap_in_generated_units(self):
return [u for u in self.units if u.swap_in_generated and u.enable] return [u for u in self.units if u.swap_in_generated and u.enable]
@property @property
def swap_in_source_units(self) : def swap_in_source_units(self):
return [u for u in self.units if u.swap_in_source and u.enable] return [u for u in self.units if u.swap_in_source and u.enable]
def title(self): def title(self):
@ -102,7 +110,6 @@ class FaceSwapScript(scripts.Script):
def show(self, is_img2img): def show(self, is_img2img):
return scripts.AlwaysVisible return scripts.AlwaysVisible
def ui(self, is_img2img): def ui(self, is_img2img):
with gr.Accordion(f"FaceSwapLab {VERSION_FLAG}", open=False): with gr.Accordion(f"FaceSwapLab {VERSION_FLAG}", open=False):
components = [] components = []
@ -121,7 +128,7 @@ class FaceSwapScript(scripts.Script):
# print("Running in ", alwayson.index(self), "position") # print("Running in ", alwayson.index(self), "position")
# logger.info("Running scripts : %s", pformat(runner.alwayson_scripts)) # logger.info("Running scripts : %s", pformat(runner.alwayson_scripts))
def read_config(self, p : StableDiffusionProcessing, *components) : def read_config(self, p: StableDiffusionProcessing, *components):
# The order of processing for the components is important # The order of processing for the components is important
# The method first process faceswap units then postprocessing units # The method first process faceswap units then postprocessing units
@ -129,15 +136,15 @@ class FaceSwapScript(scripts.Script):
self.units: List[FaceSwapUnitSettings] = [] self.units: List[FaceSwapUnitSettings] = []
#Parse and convert units flat components into FaceSwapUnitSettings # Parse and convert units flat components into FaceSwapUnitSettings
for i in range(0, self.units_count): for i in range(0, self.units_count):
self.units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] self.units += [FaceSwapUnitSettings.get_unit_configuration(i, components)]
for i, u in enumerate(self.units): for i, u in enumerate(self.units):
logger.debug("%s, %s", pformat(i), pformat(u)) logger.debug("%s, %s", pformat(i), pformat(u))
#Parse the postprocessing options # Parse the postprocessing options
#We must first find where to start from (after face swapping units) # We must first find where to start from (after face swapping units)
len_conf: int = len(fields(FaceSwapUnitSettings)) len_conf: int = len(fields(FaceSwapUnitSettings))
shift: int = self.units_count * len_conf shift: int = self.units_count * len_conf
self.postprocess_options = PostProcessingOptions( self.postprocess_options = PostProcessingOptions(
@ -145,67 +152,92 @@ class FaceSwapScript(scripts.Script):
) )
logger.debug("%s", pformat(self.postprocess_options)) logger.debug("%s", pformat(self.postprocess_options))
if self.enabled : if self.enabled:
p.do_not_save_samples = not self.keep_original_images p.do_not_save_samples = not self.keep_original_images
def process(self, p: StableDiffusionProcessing, *components): def process(self, p: StableDiffusionProcessing, *components):
self.read_config(p, *components) self.read_config(p, *components)
#If is instance of img2img, we check if face swapping in source is required. # If is instance of img2img, we check if face swapping in source is required.
if isinstance(p, StableDiffusionProcessingImg2Img): if isinstance(p, StableDiffusionProcessingImg2Img):
if self.enabled and len(self.swap_in_source_units) > 0: if self.enabled and len(self.swap_in_source_units) > 0:
init_images : List[Tuple[Optional[Image.Image], Optional[str]]] = [(img,None) for img in p.init_images] init_images: List[Tuple[Optional[Image.Image], Optional[str]]] = [
new_inits = swapper.process_images_units(get_current_model(), self.swap_in_source_units,images=init_images, upscaled_swapper=self.upscaled_swapper_in_source,force_blend=True) (img, None) for img in p.init_images
]
new_inits = swapper.process_images_units(
get_current_model(),
self.swap_in_source_units,
images=init_images,
upscaled_swapper=self.upscaled_swapper_in_source,
force_blend=True,
)
logger.info(f"processed init images: {len(init_images)}") logger.info(f"processed init images: {len(init_images)}")
if new_inits is not None : if new_inits is not None:
p.init_images = [img[0] for img in new_inits] p.init_images = [img[0] for img in new_inits]
def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args):
def postprocess(self, p : StableDiffusionProcessing, processed: Processed, *args): if self.enabled:
if self.enabled :
# Get the original images without the grid # Get the original images without the grid
orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:] orig_images: List[Image.Image] = processed.images[
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:] processed.index_of_first_image :
]
orig_infotexts: List[str] = processed.infotexts[
processed.index_of_first_image :
]
keep_original = self.keep_original_images keep_original = self.keep_original_images
# These are were images and infos of swapped images will be stored # These are were images and infos of swapped images will be stored
images = [] images = []
infotexts = [] infotexts = []
if (len(self.swap_in_generated_units))>0 : if (len(self.swap_in_generated_units)) > 0:
for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)): for i, (img, info) in enumerate(zip(orig_images, orig_infotexts)):
batch_index = i%p.batch_size batch_index = i % p.batch_size
swapped_images = swapper.process_images_units(get_current_model(), self.swap_in_generated_units, images=[(img,info)], upscaled_swapper=self.upscaled_swapper_in_generated) swapped_images = swapper.process_images_units(
if swapped_images is None : get_current_model(),
self.swap_in_generated_units,
images=[(img, info)],
upscaled_swapper=self.upscaled_swapper_in_generated,
)
if swapped_images is None:
continue continue
logger.info(f"{len(swapped_images)} images swapped") logger.info(f"{len(swapped_images)} images swapped")
for swp_img, new_info in swapped_images : for swp_img, new_info in swapped_images:
img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical) img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical)
if swp_img is not None :
save_img_debug(swp_img,"Before apply mask") if swp_img is not None:
save_img_debug(swp_img, "Before apply mask")
swp_img = imgutils.apply_mask(swp_img, p, batch_index) swp_img = imgutils.apply_mask(swp_img, p, batch_index)
save_img_debug(swp_img,"After apply mask") save_img_debug(swp_img, "After apply mask")
try : try:
if self.postprocess_options is not None: if self.postprocess_options is not None:
swp_img = enhance_image(swp_img, self.postprocess_options) swp_img = enhance_image(
swp_img, self.postprocess_options
)
except Exception as e: except Exception as e:
logger.error("Failed to upscale : %s", e) logger.error("Failed to upscale : %s", e)
logger.info("Add swp image to processed") logger.info("Add swp image to processed")
images.append(swp_img) images.append(swp_img)
infotexts.append(new_info) infotexts.append(new_info)
if p.outpath_samples and opts.samples_save : if p.outpath_samples and opts.samples_save:
save_image(swp_img, p.outpath_samples, "", p.all_seeds[batch_index], p.all_prompts[batch_index], opts.samples_format,info=new_info, p=p, suffix="-swapped") save_image(
else : swp_img,
p.outpath_samples,
"",
p.all_seeds[batch_index],
p.all_prompts[batch_index],
opts.samples_format,
info=new_info,
p=p,
suffix="-swapped",
)
else:
logger.error("swp image is None") logger.error("swp image is None")
else : else:
keep_original=True keep_original = True
# Generate grid : # Generate grid :
if opts.return_grid and len(images) > 1: if opts.return_grid and len(images) > 1:

@ -4,14 +4,22 @@ from fastapi import FastAPI, Body
from fastapi.exceptions import HTTPException from fastapi.exceptions import HTTPException
from modules.api.models import * from modules.api.models import *
from modules.api import api from modules.api import api
from scripts.faceswaplab_api.faceswaplab_api_types import FaceSwapUnit, FaceSwapRequest, FaceSwapResponse from scripts.faceswaplab_api.faceswaplab_api_types import (
FaceSwapUnit,
FaceSwapRequest,
FaceSwapResponse,
)
from scripts.faceswaplab_globals import VERSION_FLAG from scripts.faceswaplab_globals import VERSION_FLAG
import gradio as gr import gradio as gr
from typing import List, Optional from typing import List, Optional
from scripts.faceswaplab_swapping import swapper from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils.faceswaplab_logging import save_img_debug from scripts.faceswaplab_utils.faceswaplab_logging import save_img_debug
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil) from scripts.faceswaplab_utils.imgutils import (
pil_to_cv2,
check_against_nsfw,
base64_to_pil,
)
from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.models_utils import get_current_model
from modules.shared import opts from modules.shared import opts
@ -26,45 +34,59 @@ def encode_to_base64(image):
else: else:
return "" return ""
def encode_np_to_base64(image): def encode_np_to_base64(image):
pil = Image.fromarray(image) pil = Image.fromarray(image)
return api.encode_pil_to_base64(pil) return api.encode_pil_to_base64(pil)
def faceswaplab_api(_: gr.Blocks, app: FastAPI): def faceswaplab_api(_: gr.Blocks, app: FastAPI):
@app.get("/faceswaplab/version", tags=["faceswaplab"], description="Get faceswaplab version") @app.get(
"/faceswaplab/version",
tags=["faceswaplab"],
description="Get faceswaplab version",
)
async def version(): async def version():
return {"version": VERSION_FLAG} return {"version": VERSION_FLAG}
# use post as we consider the method non idempotent (which is debatable) # use post as we consider the method non idempotent (which is debatable)
@app.post("/faceswaplab/swap_face", tags=["faceswaplab"], description="Swap a face in an image using units") @app.post(
async def swap_face(request : FaceSwapRequest) -> FaceSwapResponse: "/faceswaplab/swap_face",
units : List[FaceSwapUnitSettings]= [] tags=["faceswaplab"],
src_image : Optional[Image.Image] = base64_to_pil(request.image) description="Swap a face in an image using units",
response = FaceSwapResponse(images = [], infos=[]) )
if src_image is not None : async def swap_face(request: FaceSwapRequest) -> FaceSwapResponse:
units: List[FaceSwapUnitSettings] = []
src_image: Optional[Image.Image] = base64_to_pil(request.image)
response = FaceSwapResponse(images=[], infos=[])
if src_image is not None:
for u in request.units: for u in request.units:
units.append( units.append(
FaceSwapUnitSettings(source_img=base64_to_pil(u.source_img), FaceSwapUnitSettings(
source_face = u.source_face, source_img=base64_to_pil(u.source_img),
_batch_files = u.get_batch_images(), source_face=u.source_face,
blend_faces= u.blend_faces, _batch_files=u.get_batch_images(),
enable = True, blend_faces=u.blend_faces,
same_gender = u.same_gender, enable=True,
check_similarity=u.check_similarity, same_gender=u.same_gender,
_compute_similarity=u.compute_similarity, check_similarity=u.check_similarity,
min_ref_sim= u.min_ref_sim, _compute_similarity=u.compute_similarity,
min_sim= u.min_sim, min_ref_sim=u.min_ref_sim,
_faces_index = ",".join([str(i) for i in (u.faces_index)]), min_sim=u.min_sim,
swap_in_generated=True, _faces_index=",".join([str(i) for i in (u.faces_index)]),
swap_in_source=False swap_in_generated=True,
) swap_in_source=False,
)
) )
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False)) swapped_images = swapper.process_images_units(
get_current_model(),
images=[(src_image, None)],
units=units,
upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False),
)
for img, info in swapped_images: for img, info in swapped_images:
response.images.append(encode_to_base64(img)) response.images.append(encode_to_base64(img))
response.infos.append(info) response.infos.append(info)
return response return response

@ -5,69 +5,137 @@ import dill as pickle
import gradio as gr import gradio as gr
from insightface.app.common import Face from insightface.app.common import Face
from PIL import Image from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil) from scripts.faceswaplab_utils.imgutils import (
pil_to_cv2,
check_against_nsfw,
base64_to_pil,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
class FaceSwapUnit(BaseModel) : class FaceSwapUnit(BaseModel):
# The image given in reference # The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) source_img: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
# The checkpoint file # The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None) source_face: str = Field(
description="face checkpoint (from models/faceswaplab/faces)",
examples=["my_face.pkl"],
default=None,
)
# base64 batch source images # base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) batch_images: Tuple[str] = Field(
description="list of base64 batch source images",
examples=[
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
],
default=None,
)
# Will blend faces if True # Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True) blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering # Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True) same_gender: bool = Field(description="Use same gender filtering", default=True)
# If True, discard images with low similarity # If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False) check_similarity: bool = Field(
description="If True, discard images with low similarity", default=False
)
# if True will compute similarity and add it to the image info # if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False) compute_similarity: bool = Field(
description="If True will compute similarity and add it to the image info",
default=False,
)
# Minimum similarity against the used face (reference, batch or checkpoint) # Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0) min_sim: float = Field(
description="Minimum similarity against the used face (reference, batch or checkpoint)",
default=0.0,
)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given) # Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0) min_ref_sim: float = Field(
description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)",
default=0.0,
)
# The face index to use for swapping # The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,)) faces_index: Tuple[int] = Field(
description="The face index to use for swapping, list of face numbers starting from 0",
default=(0,),
)
def get_batch_images(self) -> List[Image.Image] : def get_batch_images(self) -> List[Image.Image]:
images = [] images = []
if self.batch_images : if self.batch_images:
for img in self.batch_images : for img in self.batch_images:
images.append(base64_to_pil(img)) images.append(base64_to_pil(img))
return images return images
class PostProcessingOptions (BaseModel):
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0)
upscaler_name: str = Field(description='upscaler name', default=None) class PostProcessingOptions(BaseModel):
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0) face_restorer_name: str = Field(description="face restorer name", default=None)
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0) restorer_visibility: float = Field(
description="face restorer visibility", default=1, le=1, ge=0
)
codeformer_weight: float = Field(
description="face restorer codeformer weight", default=1, le=1, ge=0
)
upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscale_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0
)
inpainting_denoising_strengh: float = Field(
description="Inpainting denoising strenght", default=0, lt=1, ge=0
)
inpainting_prompt: str = Field(
description="Inpainting denoising strenght",
examples=["Portrait of a [gender]"],
default="Portrait of a [gender]",
)
inpainting_negative_prompt: str = Field(
description="Inpainting denoising strenght",
examples=[
"Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"
],
default="",
)
inpainting_steps: int = Field(
description="Inpainting steps",
examples=["Portrait of a [gender]"],
ge=1,
le=150,
default=20,
)
inpainting_sampler: str = Field(
description="Inpainting sampler", examples=["Euler"], default="Euler"
)
inpainting_when: InpaintingWhen = Field(
description="When inpainting happens",
examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER,
)
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0)
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]")
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler")
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER)
class FaceSwapRequest(BaseModel):
image: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions
class FaceSwapRequest(BaseModel) :
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
units : List[FaceSwapUnit]
postprocessing : PostProcessingOptions
class FaceSwapResponse(BaseModel) : class FaceSwapResponse(BaseModel):
images : List[str] = Field(description='base64 swapped image',default=None) images: List[str] = Field(description="base64 swapped image", default=None)
infos : List[str] infos: List[str]

@ -1,11 +1,10 @@
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
import os import os
MODELS_DIR = os.path.abspath(os.path.join("models","faceswaplab")) MODELS_DIR = os.path.abspath(os.path.join("models", "faceswaplab"))
ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers")) ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers"))
FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser")) FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser"))
VERSION_FLAG = "v1.1.0" VERSION_FLAG = "v1.1.0"
EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab") EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab")
NSFW_SCORE = 0.7 NSFW_SCORE = 0.7

@ -6,26 +6,29 @@ import numpy as np
from modules import shared from modules import shared
from scripts.faceswaplab_utils import imgutils from scripts.faceswaplab_utils import imgutils
from modules import shared, processing, codeformer_model from modules import shared, processing, codeformer_model
from modules.processing import (StableDiffusionProcessingImg2Img) from modules.processing import StableDiffusionProcessingImg2Img
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
InpaintingWhen,
)
from modules import sd_models from modules import sd_models
from scripts.faceswaplab_swapping import swapper from scripts.faceswaplab_swapping import swapper
def img2img_diffusion(img : Image.Image, pp: PostProcessingOptions) -> Image.Image : def img2img_diffusion(img: Image.Image, pp: PostProcessingOptions) -> Image.Image:
if pp.inpainting_denoising_strengh == 0 : if pp.inpainting_denoising_strengh == 0:
return img return img
try : try:
logger.info( logger.info(
f"""Inpainting face f"""Inpainting face
Sampler : {pp.inpainting_sampler} Sampler : {pp.inpainting_sampler}
inpainting_denoising_strength : {pp.inpainting_denoising_strengh} inpainting_denoising_strength : {pp.inpainting_denoising_strengh}
inpainting_steps : {pp.inpainting_steps} inpainting_steps : {pp.inpainting_steps}
""" """
) )
if not isinstance(pp.inpainting_sampler, str) : if not isinstance(pp.inpainting_sampler, str):
pass pass
logger.info("send faces to image to image") logger.info("send faces to image to image")
@ -33,44 +36,51 @@ inpainting_steps : {pp.inpainting_steps}
faces = swapper.get_faces(imgutils.pil_to_cv2(img)) faces = swapper.get_faces(imgutils.pil_to_cv2(img))
if faces: if faces:
for face in faces: for face in faces:
bbox =face.bbox.astype(int) bbox = face.bbox.astype(int)
mask = imgutils.create_mask(img, bbox) mask = imgutils.create_mask(img, bbox)
prompt = pp.inpainting_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman") prompt = pp.inpainting_prompt.replace(
negative_prompt = pp.inpainting_negative_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman") "[gender]", "man" if face["gender"] == 1 else "woman"
)
negative_prompt = pp.inpainting_negative_prompt.replace(
"[gender]", "man" if face["gender"] == 1 else "woman"
)
logger.info("Denoising prompt : %s", prompt) logger.info("Denoising prompt : %s", prompt)
logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh) logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh)
i2i_kwargs = {"sampler_name" :pp.inpainting_sampler, i2i_kwargs = {
"do_not_save_samples":True, "sampler_name": pp.inpainting_sampler,
"steps" :pp.inpainting_steps, "do_not_save_samples": True,
"width" : img.width, "steps": pp.inpainting_steps,
"inpainting_fill":1, "width": img.width,
"inpaint_full_res":True, "inpainting_fill": 1,
"height" : img.height, "inpaint_full_res": True,
"mask": mask, "height": img.height,
"prompt" : prompt, "mask": mask,
"negative_prompt" :negative_prompt, "prompt": prompt,
"denoising_strength" :pp.inpainting_denoising_strengh} "negative_prompt": negative_prompt,
"denoising_strength": pp.inpainting_denoising_strengh,
}
current_model_checkpoint = shared.opts.sd_model_checkpoint current_model_checkpoint = shared.opts.sd_model_checkpoint
if pp.inpainting_model and pp.inpainting_model != "Current" : if pp.inpainting_model and pp.inpainting_model != "Current":
# Change checkpoint # Change checkpoint
shared.opts.sd_model_checkpoint = pp.inpainting_model shared.opts.sd_model_checkpoint = pp.inpainting_model
sd_models.select_checkpoint sd_models.select_checkpoint
sd_models.load_model() sd_models.load_model()
i2i_p = StableDiffusionProcessingImg2Img([img], **i2i_kwargs) i2i_p = StableDiffusionProcessingImg2Img([img], **i2i_kwargs)
i2i_processed = processing.process_images(i2i_p) i2i_processed = processing.process_images(i2i_p)
if pp.inpainting_model and pp.inpainting_model != "Current" : if pp.inpainting_model and pp.inpainting_model != "Current":
# Restore checkpoint # Restore checkpoint
shared.opts.sd_model_checkpoint = current_model_checkpoint shared.opts.sd_model_checkpoint = current_model_checkpoint
sd_models.select_checkpoint sd_models.select_checkpoint
sd_models.load_model() sd_models.load_model()
images = i2i_processed.images images = i2i_processed.images
if len(images) > 0 : if len(images) > 0:
img = images[0] img = images[0]
return img return img
except Exception as e : except Exception as e:
logger.error("Failed to apply img2img to face : %s", e) logger.error("Failed to apply img2img to face : %s", e)
import traceback import traceback
traceback.print_exc() traceback.print_exc()
raise e raise e

@ -1,25 +1,28 @@
from modules.face_restoration import FaceRestoration from modules.face_restoration import FaceRestoration
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image from PIL import Image
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
InpaintingWhen,
)
from scripts.faceswaplab_postprocessing.i2i_pp import img2img_diffusion from scripts.faceswaplab_postprocessing.i2i_pp import img2img_diffusion
from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_face from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_face
def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image: def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
result_image = image result_image = image
try : try:
if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value : if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value:
result_image = img2img_diffusion(image, pp_options) result_image = img2img_diffusion(image, pp_options)
result_image = upscale_img(result_image, pp_options) result_image = upscale_img(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value : if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value:
result_image = img2img_diffusion(image,pp_options) result_image = img2img_diffusion(image, pp_options)
result_image = restore_face(result_image, pp_options) result_image = restore_face(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value : if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value:
result_image = img2img_diffusion(image,pp_options) result_image = img2img_diffusion(image, pp_options)
except Exception as e: except Exception as e:
logger.error("Failed to upscale %s", e) logger.error("Failed to upscale %s", e)

@ -4,12 +4,14 @@ from dataclasses import dataclass
from modules import shared from modules import shared
from enum import Enum from enum import Enum
class InpaintingWhen(Enum): class InpaintingWhen(Enum):
NEVER = "Never" NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all" BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face" BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All" AFTER_ALL = "After All"
@dataclass @dataclass
class PostProcessingOptions: class PostProcessingOptions:
face_restorer_name: str = "" face_restorer_name: str = ""
@ -20,13 +22,13 @@ class PostProcessingOptions:
scale: int = 1 scale: int = 1
upscale_visibility: float = 0.5 upscale_visibility: float = 0.5
inpainting_denoising_strengh : float = 0 inpainting_denoising_strengh: float = 0
inpainting_prompt : str = "" inpainting_prompt: str = ""
inpainting_negative_prompt : str = "" inpainting_negative_prompt: str = ""
inpainting_steps : int = 20 inpainting_steps: int = 20
inpainting_sampler : str = "Euler" inpainting_sampler: str = "Euler"
inpainting_when : InpaintingWhen = InpaintingWhen.BEFORE_UPSCALING inpainting_when: InpaintingWhen = InpaintingWhen.BEFORE_UPSCALING
inpainting_model : str = "Current" inpainting_model: str = "Current"
@property @property
def upscaler(self) -> UpscalerData: def upscaler(self) -> UpscalerData:

@ -1,11 +1,14 @@
from scripts.faceswaplab_postprocessing.postprocessing_options import (
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen PostProcessingOptions,
InpaintingWhen,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image from PIL import Image
import numpy as np import numpy as np
from modules import shared, processing, codeformer_model from modules import shared, processing, codeformer_model
def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image.Image :
def upscale_img(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
if pp_options.upscaler is not None and pp_options.upscaler.name != "None": if pp_options.upscaler is not None and pp_options.upscaler.name != "None":
original_image = image.copy() original_image = image.copy()
logger.info( logger.info(
@ -23,15 +26,17 @@ def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image
return result_image return result_image
return image return image
def restore_face(image : Image.Image, pp_options : PostProcessingOptions) -> Image.Image :
def restore_face(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
if pp_options.face_restorer is not None: if pp_options.face_restorer is not None:
original_image = image.copy() original_image = image.copy()
logger.info("Restore face with %s", pp_options.face_restorer.name()) logger.info("Restore face with %s", pp_options.face_restorer.name())
numpy_image = np.array(image) numpy_image = np.array(image)
if pp_options.face_restorer_name == "CodeFormer" : if pp_options.face_restorer_name == "CodeFormer":
numpy_image = codeformer_model.codeformer.restore(numpy_image, w=pp_options.codeformer_weight) numpy_image = codeformer_model.codeformer.restore(
else : numpy_image, w=pp_options.codeformer_weight
)
else:
numpy_image = pp_options.face_restorer.restore(numpy_image) numpy_image = pp_options.face_restorer.restore(numpy_image)
restored_image = Image.fromarray(numpy_image) restored_image = Image.fromarray(numpy_image)

@ -2,52 +2,215 @@ from scripts.faceswaplab_utils.models_utils import get_models
from modules import script_callbacks, shared from modules import script_callbacks, shared
import gradio as gr import gradio as gr
def on_ui_settings(): def on_ui_settings():
section = ('faceswaplab', "FaceSwapLab") section = ("faceswaplab", "FaceSwapLab")
models = get_models() models = get_models()
shared.opts.add_option("faceswaplab_model", shared.OptionInfo( shared.opts.add_option(
models[0] if len(models) > 0 else "None", "FaceSwapLab FaceSwap Model", gr.Dropdown, {"interactive": True, "choices" : models}, section=section)) "faceswaplab_model",
shared.opts.add_option("faceswaplab_keep_original", shared.OptionInfo( shared.OptionInfo(
False, "keep original image before swapping", gr.Checkbox, {"interactive": True}, section=section)) models[0] if len(models) > 0 else "None",
shared.opts.add_option("faceswaplab_units_count", shared.OptionInfo( "FaceSwapLab FaceSwap Model",
3, "Max faces units (requires restart)", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}, section=section)) gr.Dropdown,
{"interactive": True, "choices": models},
shared.opts.add_option("faceswaplab_detection_threshold", shared.OptionInfo( section=section,
0.5, "Detection threshold ", gr.Slider, {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, section=section)) ),
)
shared.opts.add_option(
"faceswaplab_keep_original",
shared.OptionInfo(
False,
"keep original image before swapping",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_units_count",
shared.OptionInfo(
3,
"Max faces units (requires restart)",
gr.Slider,
{"minimum": 1, "maximum": 10, "step": 1},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_detection_threshold",
shared.OptionInfo(
0.5,
"Detection threshold ",
gr.Slider,
{"minimum": 0.1, "maximum": 0.99, "step": 0.001},
section=section,
),
)
shared.opts.add_option("faceswaplab_pp_default_face_restorer", shared.OptionInfo( shared.opts.add_option(
None, "UI Default post processing face restorer (requires restart)", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section)) "faceswaplab_pp_default_face_restorer",
shared.opts.add_option("faceswaplab_pp_default_face_restorer_visibility", shared.OptionInfo( shared.OptionInfo(
1, "UI Default post processing face restorer visibility (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) None,
shared.opts.add_option("faceswaplab_pp_default_face_restorer_weight", shared.OptionInfo( "UI Default post processing face restorer (requires restart)",
1, "UI Default post processing face restorer weight (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) gr.Dropdown,
shared.opts.add_option("faceswaplab_pp_default_upscaler", shared.OptionInfo( {
None, "UI Default post processing upscaler (requires restart)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section)) "interactive": True,
shared.opts.add_option("faceswaplab_pp_default_upscaler_visibility", shared.OptionInfo( "choices": ["None"] + [x.name() for x in shared.face_restorers],
1, "UI Default post processing upscaler visibility(requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) },
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_face_restorer_visibility",
shared.OptionInfo(
1,
"UI Default post processing face restorer visibility (requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_face_restorer_weight",
shared.OptionInfo(
1,
"UI Default post processing face restorer weight (requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_upscaler",
shared.OptionInfo(
None,
"UI Default post processing upscaler (requires restart)",
gr.Dropdown,
{
"interactive": True,
"choices": [upscaler.name for upscaler in shared.sd_upscalers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_upscaler_visibility",
shared.OptionInfo(
1,
"UI Default post processing upscaler visibility(requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper",
shared.OptionInfo(
False,
"Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_upscaler",
shared.OptionInfo(
None,
"Upscaled swapper upscaler (Recommanded : LDSR but slow)",
gr.Dropdown,
{
"interactive": True,
"choices": [upscaler.name for upscaler in shared.sd_upscalers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_sharpen",
shared.OptionInfo(
False,
"Upscaled swapper sharpen",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_fixcolor",
shared.OptionInfo(
False,
"Upscaled swapper color correction",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_improved_mask",
shared.OptionInfo(
True,
"Use improved segmented mask (use pastenet to mask only the face)",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer",
shared.OptionInfo(
None,
"Upscaled swapper face restorer",
gr.Dropdown,
{
"interactive": True,
"choices": ["None"] + [x.name() for x in shared.face_restorers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer_visibility",
shared.OptionInfo(
1,
"Upscaled swapper face restorer visibility",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer_weight",
shared.OptionInfo(
1,
"Upscaled swapper face restorer weight (codeformer)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_fthresh",
shared.OptionInfo(
10,
"Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.",
gr.Slider,
{"minimum": 5, "maximum": 250, "step": 1},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_erosion",
shared.OptionInfo(
1,
"Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.",
gr.Slider,
{"minimum": 0, "maximum": 10, "step": 0.001},
section=section,
),
)
shared.opts.add_option("faceswaplab_upscaled_swapper", shared.OptionInfo(
False, "Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_upscaler", shared.OptionInfo(
None, "Upscaled swapper upscaler (Recommanded : LDSR but slow)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_sharpen", shared.OptionInfo(
False, "Upscaled swapper sharpen", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fixcolor", shared.OptionInfo(
False, "Upscaled swapper color correction", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_improved_mask", shared.OptionInfo(
True, "Use improved segmented mask (use pastenet to mask only the face)", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer", shared.OptionInfo(
None, "Upscaled swapper face restorer", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_visibility", shared.OptionInfo(
1, "Upscaled swapper face restorer visibility", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_weight", shared.OptionInfo(
1, "Upscaled swapper face restorer weight (codeformer)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fthresh", shared.OptionInfo(
10, "Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.", gr.Slider, {"minimum": 5, "maximum": 250, "step": 1}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_erosion", shared.OptionInfo(
1, "Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.", gr.Slider, {"minimum": 0, "maximum": 10, "step": 0.001}, section=section))
script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_ui_settings(on_ui_settings)

@ -7,6 +7,7 @@ from functools import lru_cache
from typing import Union, List from typing import Union, List
from torch import device as torch_device from torch import device as torch_device
@lru_cache @lru_cache
def get_parsing_model(device: torch_device) -> torch.nn.Module: def get_parsing_model(device: torch_device) -> torch.nn.Module:
""" """
@ -21,7 +22,12 @@ def get_parsing_model(device: torch_device) -> torch.nn.Module:
""" """
return init_parsing_model(device=device) return init_parsing_model(device=device)
def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert_bgr_to_rgb: bool = True, use_float32: bool = True) -> Union[torch.Tensor, List[torch.Tensor]]:
def convert_image_to_tensor(
images: Union[np.ndarray, List[np.ndarray]],
convert_bgr_to_rgb: bool = True,
use_float32: bool = True,
) -> Union[torch.Tensor, List[torch.Tensor]]:
""" """
Converts an image or a list of images to PyTorch tensor. Converts an image or a list of images to PyTorch tensor.
@ -33,10 +39,13 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert
Returns: Returns:
PyTorch tensor or a list of PyTorch tensors. PyTorch tensor or a list of PyTorch tensors.
""" """
def _convert_single_image_to_tensor(image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool) -> torch.Tensor:
def _convert_single_image_to_tensor(
image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool
) -> torch.Tensor:
if image.shape[2] == 3 and convert_bgr_to_rgb: if image.shape[2] == 3 and convert_bgr_to_rgb:
if image.dtype == 'float64': if image.dtype == "float64":
image = image.astype('float32') image = image.astype("float32")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_tensor = torch.from_numpy(image.transpose(2, 0, 1)) image_tensor = torch.from_numpy(image.transpose(2, 0, 1))
if use_float32: if use_float32:
@ -44,10 +53,14 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert
return image_tensor return image_tensor
if isinstance(images, list): if isinstance(images, list):
return [_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32) for image in images] return [
_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32)
for image in images
]
else: else:
return _convert_single_image_to_tensor(images, convert_bgr_to_rgb, use_float32) return _convert_single_image_to_tensor(images, convert_bgr_to_rgb, use_float32)
def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarray: def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarray:
""" """
Generates a face mask given a face image. Generates a face mask given a face image.
@ -60,12 +73,18 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr
The face mask as a numpy.ndarray. The face mask as a numpy.ndarray.
""" """
# Resize the face image for the model # Resize the face image for the model
resized_face_image = cv2.resize(face_image, (512, 512), interpolation=cv2.INTER_LINEAR) resized_face_image = cv2.resize(
face_image, (512, 512), interpolation=cv2.INTER_LINEAR
)
# Preprocess the image # Preprocess the image
face_input = convert_image_to_tensor((resized_face_image.astype('float32') / 255.0), convert_bgr_to_rgb=True, use_float32=True) face_input = convert_image_to_tensor(
(resized_face_image.astype("float32") / 255.0),
convert_bgr_to_rgb=True,
use_float32=True,
)
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
assert isinstance(face_input,torch.Tensor) assert isinstance(face_input, torch.Tensor)
face_input = torch.unsqueeze(face_input, 0).to(device) face_input = torch.unsqueeze(face_input, 0).to(device)
# Pass the image through the model # Pass the image through the model
@ -75,7 +94,27 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr
# Generate the mask from the model output # Generate the mask from the model output
parse_mask = np.zeros(model_output.shape) parse_mask = np.zeros(model_output.shape)
MASK_COLOR_MAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0] MASK_COLOR_MAP = [
0,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
0,
255,
0,
0,
0,
]
for idx, color in enumerate(MASK_COLOR_MAP): for idx, color in enumerate(MASK_COLOR_MAP):
parse_mask[model_output == idx] = color parse_mask[model_output == idx] = color

@ -50,12 +50,12 @@ from scripts.faceswaplab_globals import FACE_PARSER_DIR
ROOT_DIR = FACE_PARSER_DIR ROOT_DIR = FACE_PARSER_DIR
def load_file_from_url(url, model_dir=None, progress=True, file_name=None): def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py"""
"""
if model_dir is None: if model_dir is None:
hub_dir = get_dir() hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints') model_dir = os.path.join(hub_dir, "checkpoints")
os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True) os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True)
@ -70,10 +70,12 @@ def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
return cached_file return cached_file
def init_parsing_model(device='cuda'): def init_parsing_model(device="cuda"):
model = ParseNet(in_size=512, out_size=512, parsing_ch=19) model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth' model_url = "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth"
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) model_path = load_file_from_url(
url=model_url, model_dir="weights/facelib", progress=True, file_name=None
)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage) load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(load_net, strict=True) model.load_state_dict(load_net, strict=True)
model.eval() model.eval()

@ -499,27 +499,27 @@ class NormLayer(nn.Module):
input_size: input shape without batch size, for layer norm. input_size: input shape without batch size, for layer norm.
""" """
def __init__(self, channels, normalize_shape=None, norm_type='bn'): def __init__(self, channels, normalize_shape=None, norm_type="bn"):
super(NormLayer, self).__init__() super(NormLayer, self).__init__()
norm_type = norm_type.lower() norm_type = norm_type.lower()
self.norm_type = norm_type self.norm_type = norm_type
if norm_type == 'bn': if norm_type == "bn":
self.norm = nn.BatchNorm2d(channels, affine=True) self.norm = nn.BatchNorm2d(channels, affine=True)
elif norm_type == 'in': elif norm_type == "in":
self.norm = nn.InstanceNorm2d(channels, affine=False) self.norm = nn.InstanceNorm2d(channels, affine=False)
elif norm_type == 'gn': elif norm_type == "gn":
self.norm = nn.GroupNorm(32, channels, affine=True) self.norm = nn.GroupNorm(32, channels, affine=True)
elif norm_type == 'pixel': elif norm_type == "pixel":
self.norm = lambda x: F.normalize(x, p=2, dim=1) self.norm = lambda x: F.normalize(x, p=2, dim=1)
elif norm_type == 'layer': elif norm_type == "layer":
self.norm = nn.LayerNorm(normalize_shape) self.norm = nn.LayerNorm(normalize_shape)
elif norm_type == 'none': elif norm_type == "none":
self.norm = lambda x: x * 1.0 self.norm = lambda x: x * 1.0
else: else:
assert 1 == 0, f'Norm type {norm_type} not support.' assert 1 == 0, f"Norm type {norm_type} not support."
def forward(self, x, ref=None): def forward(self, x, ref=None):
if self.norm_type == 'spade': if self.norm_type == "spade":
return self.norm(x, ref) return self.norm(x, ref)
else: else:
return self.norm(x) return self.norm(x)
@ -537,51 +537,56 @@ class ReluLayer(nn.Module):
- none: direct pass - none: direct pass
""" """
def __init__(self, channels, relu_type='relu'): def __init__(self, channels, relu_type="relu"):
super(ReluLayer, self).__init__() super(ReluLayer, self).__init__()
relu_type = relu_type.lower() relu_type = relu_type.lower()
if relu_type == 'relu': if relu_type == "relu":
self.func = nn.ReLU(True) self.func = nn.ReLU(True)
elif relu_type == 'leakyrelu': elif relu_type == "leakyrelu":
self.func = nn.LeakyReLU(0.2, inplace=True) self.func = nn.LeakyReLU(0.2, inplace=True)
elif relu_type == 'prelu': elif relu_type == "prelu":
self.func = nn.PReLU(channels) self.func = nn.PReLU(channels)
elif relu_type == 'selu': elif relu_type == "selu":
self.func = nn.SELU(True) self.func = nn.SELU(True)
elif relu_type == 'none': elif relu_type == "none":
self.func = lambda x: x * 1.0 self.func = lambda x: x * 1.0
else: else:
assert 1 == 0, f'Relu type {relu_type} not support.' assert 1 == 0, f"Relu type {relu_type} not support."
def forward(self, x): def forward(self, x):
return self.func(x) return self.func(x)
class ConvLayer(nn.Module): class ConvLayer(nn.Module):
def __init__(
def __init__(self, self,
in_channels, in_channels,
out_channels, out_channels,
kernel_size=3, kernel_size=3,
scale='none', scale="none",
norm_type='none', norm_type="none",
relu_type='none', relu_type="none",
use_pad=True, use_pad=True,
bias=True): bias=True,
):
super(ConvLayer, self).__init__() super(ConvLayer, self).__init__()
self.use_pad = use_pad self.use_pad = use_pad
self.norm_type = norm_type self.norm_type = norm_type
if norm_type in ['bn']: if norm_type in ["bn"]:
bias = False bias = False
stride = 2 if scale == 'down' else 1 stride = 2 if scale == "down" else 1
self.scale_func = lambda x: x self.scale_func = lambda x: x
if scale == 'up': if scale == "up":
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') self.scale_func = lambda x: nn.functional.interpolate(
x, scale_factor=2, mode="nearest"
)
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2))) self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.0) / 2)))
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) self.conv2d = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, bias=bias
)
self.relu = ReluLayer(out_channels, relu_type) self.relu = ReluLayer(out_channels, relu_type)
self.norm = NormLayer(out_channels, norm_type=norm_type) self.norm = NormLayer(out_channels, norm_type=norm_type)
@ -601,19 +606,27 @@ class ResidualBlock(nn.Module):
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
""" """
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'): def __init__(self, c_in, c_out, relu_type="prelu", norm_type="bn", scale="none"):
super(ResidualBlock, self).__init__() super(ResidualBlock, self).__init__()
if scale == 'none' and c_in == c_out: if scale == "none" and c_in == c_out:
self.shortcut_func = lambda x: x self.shortcut_func = lambda x: x
else: else:
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale) self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']} scale_config_dict = {
"down": ["none", "down"],
"up": ["up", "none"],
"none": ["none", "none"],
}
scale_conf = scale_config_dict[scale] scale_conf = scale_config_dict[scale]
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type) self.conv1 = ConvLayer(
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none') c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type
)
self.conv2 = ConvLayer(
c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type="none"
)
def forward(self, x): def forward(self, x):
identity = self.shortcut_func(x) identity = self.shortcut_func(x)
@ -624,20 +637,21 @@ class ResidualBlock(nn.Module):
class ParseNet(nn.Module): class ParseNet(nn.Module):
def __init__(
def __init__(self, self,
in_size=128, in_size=128,
out_size=128, out_size=128,
min_feat_size=32, min_feat_size=32,
base_ch=64, base_ch=64,
parsing_ch=19, parsing_ch=19,
res_depth=10, res_depth=10,
relu_type='LeakyReLU', relu_type="LeakyReLU",
norm_type='bn', norm_type="bn",
ch_range=[32, 256]): ch_range=[32, 256],
):
super().__init__() super().__init__()
self.res_depth = res_depth self.res_depth = res_depth
act_args = {'norm_type': norm_type, 'relu_type': relu_type} act_args = {"norm_type": norm_type, "relu_type": relu_type}
min_ch, max_ch = ch_range min_ch, max_ch = ch_range
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731 ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
@ -652,17 +666,19 @@ class ParseNet(nn.Module):
head_ch = base_ch head_ch = base_ch
for i in range(down_steps): for i in range(down_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2) cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args)) self.encoder.append(ResidualBlock(cin, cout, scale="down", **act_args))
head_ch = head_ch * 2 head_ch = head_ch * 2
self.body = [] self.body = []
for i in range(res_depth): for i in range(res_depth):
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)) self.body.append(
ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)
)
self.decoder = [] self.decoder = []
for i in range(up_steps): for i in range(up_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2) cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args)) self.decoder.append(ResidualBlock(cin, cout, scale="up", **act_args))
head_ch = head_ch // 2 head_ch = head_ch // 2
self.encoder = nn.Sequential(*self.encoder) self.encoder = nn.Sequential(*self.encoder)

@ -12,7 +12,11 @@ from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import cosine_similarity
from scripts.faceswaplab_swapping import upscaled_inswapper from scripts.faceswaplab_swapping import upscaled_inswapper
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2, check_against_nsfw from scripts.faceswaplab_utils.imgutils import (
cv2_to_pil,
pil_to_cv2,
check_against_nsfw,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts import faceswaplab_globals from scripts import faceswaplab_globals
from modules.shared import opts from modules.shared import opts
@ -48,6 +52,7 @@ def cosine_similarity_face(face1, face2) -> float:
# Return the maximum of 0 and the calculated similarity as the final similarity score # Return the maximum of 0 and the calculated similarity as the final similarity score
return max(0, similarity[0, 0]) return max(0, similarity[0, 0])
def compare_faces(img1: Image.Image, img2: Image.Image) -> float: def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
""" """
Compares the similarity between two faces extracted from images using cosine similarity. Compares the similarity between two faces extracted from images using cosine similarity.
@ -76,6 +81,7 @@ def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
class FaceModelException(Exception): class FaceModelException(Exception):
"""Exception raised when an error is encountered in the face model.""" """Exception raised when an error is encountered in the face model."""
def __init__(self, message: str) -> None: def __init__(self, message: str) -> None:
""" """
Args: Args:
@ -84,6 +90,7 @@ class FaceModelException(Exception):
self.message = message self.message = message
super().__init__(self.message) super().__init__(self.message)
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def getAnalysisModel(): def getAnalysisModel():
""" """
@ -92,7 +99,7 @@ def getAnalysisModel():
Returns: Returns:
insightface.app.FaceAnalysis: The analysis model for face analysis. insightface.app.FaceAnalysis: The analysis model for face analysis.
""" """
try : try:
if not os.path.exists(faceswaplab_globals.ANALYZER_DIR): if not os.path.exists(faceswaplab_globals.ANALYZER_DIR):
os.makedirs(faceswaplab_globals.ANALYZER_DIR) os.makedirs(faceswaplab_globals.ANALYZER_DIR)
@ -101,10 +108,13 @@ def getAnalysisModel():
return insightface.app.FaceAnalysis( return insightface.app.FaceAnalysis(
name="buffalo_l", providers=providers, root=faceswaplab_globals.ANALYZER_DIR name="buffalo_l", providers=providers, root=faceswaplab_globals.ANALYZER_DIR
) )
except Exception as e : except Exception as e:
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)") logger.error(
"Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)"
)
raise FaceModelException("Loading of swapping model failed") raise FaceModelException("Loading of swapping model failed")
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def getFaceSwapModel(model_path: str): def getFaceSwapModel(model_path: str):
""" """
@ -116,14 +126,23 @@ def getFaceSwapModel(model_path: str):
Returns: Returns:
insightface.model_zoo.FaceModel: The face swap model. insightface.model_zoo.FaceModel: The face swap model.
""" """
try : try:
# Initializes the face swap model using the specified model path. # Initializes the face swap model using the specified model path.
return upscaled_inswapper.UpscaledINSwapper(insightface.model_zoo.get_model(model_path, providers=providers)) return upscaled_inswapper.UpscaledINSwapper(
except Exception as e : insightface.model_zoo.get_model(model_path, providers=providers)
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)") )
except Exception as e:
logger.error(
"Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)"
)
def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[int]=None, sort_by_face_size = False) -> List[Face]: def get_faces(
img_data: np.ndarray,
det_size=(640, 640),
det_thresh: Optional[int] = None,
sort_by_face_size=False,
) -> List[Face]:
""" """
Detects and retrieves faces from an image using an analysis model. Detects and retrieves faces from an image using an analysis model.
@ -136,7 +155,7 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i
list: A list of detected faces, sorted by their x-coordinate of the bounding box. list: A list of detected faces, sorted by their x-coordinate of the bounding box.
""" """
if det_thresh is None : if det_thresh is None:
det_thresh = opts.data.get("faceswaplab_detection_threshold", 0.5) det_thresh = opts.data.get("faceswaplab_detection_threshold", 0.5)
# Create a deep copy of the analysis model (otherwise det_size is attached to the analysis model and can't be changed) # Create a deep copy of the analysis model (otherwise det_size is attached to the analysis model and can't be changed)
@ -155,8 +174,12 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i
return get_faces(img_data, det_size=det_size_half, det_thresh=det_thresh) return get_faces(img_data, det_size=det_size_half, det_thresh=det_thresh)
try: try:
if sort_by_face_size : if sort_by_face_size:
return sorted(face, reverse=True, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1])) return sorted(
face,
reverse=True,
key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]),
)
# Sort the detected faces based on their x-coordinate of the bounding box # Sort the detected faces based on their x-coordinate of the bounding box
return sorted(face, key=lambda x: x.bbox[0]) return sorted(face, key=lambda x: x.bbox[0])
@ -164,7 +187,6 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i
return [] return []
@dataclass @dataclass
class ImageResult: class ImageResult:
""" """
@ -222,12 +244,15 @@ def get_faces_from_img_files(files):
if len(files) > 0: if len(files) > 0:
for file in files: for file in files:
img = Image.open(file.name) # Open the image file img = Image.open(file.name) # Open the image file
face = get_or_default(get_faces(pil_to_cv2(img)), 0, None) # Extract faces from the image face = get_or_default(
get_faces(pil_to_cv2(img)), 0, None
) # Extract faces from the image
if face is not None: if face is not None:
faces.append(face) # Add the detected face to the list of faces faces.append(face) # Add the detected face to the list of faces
return faces return faces
def blend_faces(faces: List[Face]) -> Face: def blend_faces(faces: List[Face]) -> Face:
""" """
Blends the embeddings of multiple faces into a single face. Blends the embeddings of multiple faces into a single face.
@ -258,9 +283,15 @@ def blend_faces(faces: List[Face]) -> Face:
# Create a new Face object using the properties of the first face in the list # Create a new Face object using the properties of the first face in the list
# Assign the blended embedding to the blended Face object # Assign the blended embedding to the blended Face object
blended = Face(embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age) blended = Face(
embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age
)
assert not np.array_equal(blended.embedding,faces[0].embedding) if len(faces) > 1 else True, "If len(faces)>0, the blended embedding should not be the same than the first image" assert (
not np.array_equal(blended.embedding, faces[0].embedding)
if len(faces) > 1
else True
), "If len(faces)>0, the blended embedding should not be the same than the first image"
return blended return blended
@ -275,9 +306,9 @@ def swap_face(
model: str, model: str,
faces_index: Set[int] = {0}, faces_index: Set[int] = {0},
same_gender=True, same_gender=True,
upscaled_swapper = False, upscaled_swapper=False,
compute_similarity = True, compute_similarity=True,
sort_by_face_size = False sort_by_face_size=False,
) -> ImageResult: ) -> ImageResult:
""" """
Swaps faces in the target image with the source face. Swaps faces in the target image with the source face.
@ -295,7 +326,7 @@ def swap_face(
""" """
return_result = ImageResult(target_img, {}, {}) return_result = ImageResult(target_img, {}, {})
try : try:
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR) target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
gender = source_face["gender"] gender = source_face["gender"]
logger.info("Source Gender %s", gender) logger.info("Source Gender %s", gender)
@ -313,19 +344,23 @@ def swap_face(
for i, swapped_face in enumerate(target_faces): for i, swapped_face in enumerate(target_faces):
logger.info(f"swap face {i}") logger.info(f"swap face {i}")
if i in faces_index: if i in faces_index:
result = face_swapper.get(result, swapped_face, source_face, upscale = upscaled_swapper) result = face_swapper.get(
result, swapped_face, source_face, upscale=upscaled_swapper
)
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
return_result.image = result_image return_result.image = result_image
if compute_similarity:
if compute_similarity :
try: try:
result_faces = get_faces( result_faces = get_faces(
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR), sort_by_face_size=sort_by_face_size cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR),
sort_by_face_size=sort_by_face_size,
) )
if same_gender: if same_gender:
result_faces = [x for x in result_faces if x["gender"] == gender] result_faces = [
x for x in result_faces if x["gender"] == gender
]
for i, swapped_face in enumerate(result_faces): for i, swapped_face in enumerate(result_faces):
logger.info(f"compare face {i}") logger.info(f"compare face {i}")
@ -343,13 +378,20 @@ def swap_face(
except Exception as e: except Exception as e:
logger.error("Similarity processing failed %s", e) logger.error("Similarity processing failed %s", e)
raise e raise e
except Exception as e : except Exception as e:
logger.error("Conversion failed %s", e) logger.error("Conversion failed %s", e)
raise e raise e
return return_result return return_result
def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, info = None, upscaled_swapper = False, force_blend = False) -> List: def process_image_unit(
model,
unit: FaceSwapUnitSettings,
image: Image.Image,
info=None,
upscaled_swapper=False,
force_blend=False,
) -> List:
"""Process one image and return a List of (image, info) (one if blended, many if not). """Process one image and return a List of (image, info) (one if blended, many if not).
Args: Args:
@ -362,23 +404,28 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
""" """
results = [] results = []
if unit.enable : if unit.enable:
if check_against_nsfw(image) : if check_against_nsfw(image):
return [(image, info)] return [(image, info)]
if not unit.blend_faces and not force_blend : if not unit.blend_faces and not force_blend:
src_faces = unit.faces src_faces = unit.faces
logger.info(f"will generate {len(src_faces)} images") logger.info(f"will generate {len(src_faces)} images")
else : else:
logger.info("blend all faces together") logger.info("blend all faces together")
src_faces = [unit.blended_faces] src_faces = [unit.blended_faces]
assert(not np.array_equal(unit.reference_face.embedding,src_faces[0].embedding) if len(unit.faces)>1 else True), "Reference face cannot be the same as blended" assert (
not np.array_equal(
unit.reference_face.embedding, src_faces[0].embedding
)
if len(unit.faces) > 1
else True
), "Reference face cannot be the same as blended"
for i,src_face in enumerate(src_faces): for i, src_face in enumerate(src_faces):
logger.info(f"Process face {i}") logger.info(f"Process face {i}")
if unit.reference_face is not None : if unit.reference_face is not None:
reference_face = unit.reference_face reference_face = unit.reference_face
else : else:
logger.info("Use source face as reference face") logger.info("Use source face as reference face")
reference_face = src_face reference_face = src_face
@ -392,14 +439,30 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
same_gender=unit.same_gender, same_gender=unit.same_gender,
upscaled_swapper=upscaled_swapper, upscaled_swapper=upscaled_swapper,
compute_similarity=unit.compute_similarity, compute_similarity=unit.compute_similarity,
sort_by_face_size=unit.sort_by_size sort_by_face_size=unit.sort_by_size,
) )
save_img_debug(result.image, "After swap") save_img_debug(result.image, "After swap")
if result.image is None : if result.image is None:
logger.error("Result image is None") logger.error("Result image is None")
if (not unit.check_similarity) or result.similarity and all([result.similarity.values()!=0]+[x >= unit.min_sim for x in result.similarity.values()]) and all([result.ref_similarity.values()!=0]+[x >= unit.min_ref_sim for x in result.ref_similarity.values()]): if (
results.append((result.image, f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}")) (not unit.check_similarity)
or result.similarity
and all(
[result.similarity.values() != 0]
+ [x >= unit.min_sim for x in result.similarity.values()]
)
and all(
[result.ref_similarity.values() != 0]
+ [x >= unit.min_ref_sim for x in result.ref_similarity.values()]
)
):
results.append(
(
result.image,
f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}",
)
)
else: else:
logger.warning( logger.warning(
f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})" f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})"
@ -407,22 +470,33 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
logger.debug("process_image_unit : Unit produced %s results", len(results)) logger.debug("process_image_unit : Unit produced %s results", len(results))
return results return results
def process_images_units(model, units : List[FaceSwapUnitSettings], images: List[Tuple[Optional[Image.Image], Optional[str]]], upscaled_swapper = False, force_blend = False) -> Union[List,None]:
if len(units) == 0 : def process_images_units(
model,
units: List[FaceSwapUnitSettings],
images: List[Tuple[Optional[Image.Image], Optional[str]]],
upscaled_swapper=False,
force_blend=False,
) -> Union[List, None]:
if len(units) == 0:
logger.info("Finished processing image, return %s images", len(images)) logger.info("Finished processing image, return %s images", len(images))
return None return None
logger.debug("%s more units", len(units)) logger.debug("%s more units", len(units))
processed_images = [] processed_images = []
for i,(image, info) in enumerate(images) : for i, (image, info) in enumerate(images):
logger.debug("Processing image %s", i) logger.debug("Processing image %s", i)
swapped = process_image_unit(model,units[0],image, info, upscaled_swapper, force_blend) swapped = process_image_unit(
model, units[0], image, info, upscaled_swapper, force_blend
)
logger.debug("Image %s -> %s images", i, len(swapped)) logger.debug("Image %s -> %s images", i, len(swapped))
nexts = process_images_units(model,units[1:],swapped, upscaled_swapper,force_blend) nexts = process_images_units(
if nexts : model, units[1:], swapped, upscaled_swapper, force_blend
)
if nexts:
processed_images.extend(nexts) processed_images.extend(nexts)
else : else:
processed_images.extend(swapped) processed_images.extend(swapped)
return processed_images return processed_images

@ -1,4 +1,3 @@
import cv2 import cv2
import numpy as np import numpy as np
import onnx import onnx
@ -14,18 +13,22 @@ from PIL import Image
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
from scripts.faceswaplab_postprocessing import upscaling from scripts.faceswaplab_postprocessing import upscaling
from scripts.faceswaplab_postprocessing.postprocessing_options import \ from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions PostProcessingOptions,
)
from scripts.faceswaplab_swapping.facemask import generate_face_mask from scripts.faceswaplab_swapping.facemask import generate_face_mask
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2 from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2
def get_upscaler() -> UpscalerData: def get_upscaler() -> UpscalerData:
for upscaler in shared.sd_upscalers: for upscaler in shared.sd_upscalers:
if upscaler.name == opts.data.get("faceswaplab_upscaled_swapper_upscaler", "LDSR"): if upscaler.name == opts.data.get(
"faceswaplab_upscaled_swapper_upscaler", "LDSR"
):
return upscaler return upscaler
return None return None
def merge_images_with_mask(image1, image2, mask): def merge_images_with_mask(image1, image2, mask):
if image1.shape != image2.shape or image1.shape[:2] != mask.shape: if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
raise ValueError("Img should have the same shape") raise ValueError("Img should have the same shape")
@ -36,153 +39,202 @@ def merge_images_with_mask(image1, image2, mask):
merged_image = cv2.add(empty_region, masked_region) merged_image = cv2.add(empty_region, masked_region)
return merged_image return merged_image
def erode_mask(mask, kernel_size=3, iterations=1): def erode_mask(mask, kernel_size=3, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8) kernel = np.ones((kernel_size, kernel_size), np.uint8)
eroded_mask = cv2.erode(mask, kernel, iterations=iterations) eroded_mask = cv2.erode(mask, kernel, iterations=iterations)
return eroded_mask return eroded_mask
def apply_gaussian_blur(mask, kernel_size=(5, 5), sigma_x=0): def apply_gaussian_blur(mask, kernel_size=(5, 5), sigma_x=0):
blurred_mask = cv2.GaussianBlur(mask, kernel_size, sigma_x) blurred_mask = cv2.GaussianBlur(mask, kernel_size, sigma_x)
return blurred_mask return blurred_mask
def dilate_mask(mask, kernel_size=5, iterations=1): def dilate_mask(mask, kernel_size=5, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8) kernel = np.ones((kernel_size, kernel_size), np.uint8)
dilated_mask = cv2.dilate(mask, kernel, iterations=iterations) dilated_mask = cv2.dilate(mask, kernel, iterations=iterations)
return dilated_mask return dilated_mask
def get_face_mask(aimg,bgr_fake):
mask1 = generate_face_mask(aimg, device = shared.device) def get_face_mask(aimg, bgr_fake):
mask2 = generate_face_mask(bgr_fake, device = shared.device) mask1 = generate_face_mask(aimg, device=shared.device)
mask = dilate_mask(cv2.bitwise_or(mask1,mask2)) mask2 = generate_face_mask(bgr_fake, device=shared.device)
mask = dilate_mask(cv2.bitwise_or(mask1, mask2))
return mask return mask
class UpscaledINSwapper(): class UpscaledINSwapper:
def __init__(self, inswapper : INSwapper): def __init__(self, inswapper: INSwapper):
self.__dict__.update(inswapper.__dict__) self.__dict__.update(inswapper.__dict__)
def forward(self, img, latent): def forward(self, img, latent):
img = (img - self.input_mean) / self.input_std img = (img - self.input_mean) / self.input_std
pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0] pred = self.session.run(
self.output_names, {self.input_names[0]: img, self.input_names[1]: latent}
)[0]
return pred return pred
def super_resolution(self,img, k = 2) : def super_resolution(self, img, k=2):
pil_img = cv2_to_pil(img) pil_img = cv2_to_pil(img)
options = PostProcessingOptions( options = PostProcessingOptions(
upscaler_name=opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR'), upscaler_name=opts.data.get(
"faceswaplab_upscaled_swapper_upscaler", "LDSR"
),
upscale_visibility=1, upscale_visibility=1,
scale=k, scale=k,
face_restorer_name=opts.data.get('faceswaplab_upscaled_swapper_face_restorer', ""), face_restorer_name=opts.data.get(
codeformer_weight= opts.data.get('faceswaplab_upscaled_swapper_face_restorer_weight', 1), "faceswaplab_upscaled_swapper_face_restorer", ""
restorer_visibility=opts.data.get('faceswaplab_upscaled_swapper_face_restorer_visibility', 1)) ),
codeformer_weight=opts.data.get(
"faceswaplab_upscaled_swapper_face_restorer_weight", 1
),
restorer_visibility=opts.data.get(
"faceswaplab_upscaled_swapper_face_restorer_visibility", 1
),
)
upscaled = upscaling.upscale_img(pil_img, options) upscaled = upscaling.upscale_img(pil_img, options)
upscaled = upscaling.restore_face(upscaled, options) upscaled = upscaling.restore_face(upscaled, options)
return pil_to_cv2(upscaled) return pil_to_cv2(upscaled)
def get(self, img, target_face, source_face, paste_back=True, upscale = True): def get(self, img, target_face, source_face, paste_back=True, upscale=True):
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]) aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size, blob = cv2.dnn.blobFromImage(
(self.input_mean, self.input_mean, self.input_mean), swapRB=True) aimg,
latent = source_face.normed_embedding.reshape((1,-1)) 1.0 / self.input_std,
self.input_size,
(self.input_mean, self.input_mean, self.input_mean),
swapRB=True,
)
latent = source_face.normed_embedding.reshape((1, -1))
latent = np.dot(latent, self.emap) latent = np.dot(latent, self.emap)
latent /= np.linalg.norm(latent) latent /= np.linalg.norm(latent)
pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0] pred = self.session.run(
#print(latent.shape, latent.dtype, pred.shape) self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent}
img_fake = pred.transpose((0,2,3,1))[0] )[0]
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1] # print(latent.shape, latent.dtype, pred.shape)
img_fake = pred.transpose((0, 2, 3, 1))[0]
try : bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:, :, ::-1]
try:
if not paste_back: if not paste_back:
return bgr_fake, M return bgr_fake, M
else: else:
target_img = img target_img = img
def compute_diff(bgr_fake,aimg) : def compute_diff(bgr_fake, aimg):
fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32) fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
fake_diff = np.abs(fake_diff).mean(axis=2) fake_diff = np.abs(fake_diff).mean(axis=2)
fake_diff[:2,:] = 0 fake_diff[:2, :] = 0
fake_diff[-2:,:] = 0 fake_diff[-2:, :] = 0
fake_diff[:,:2] = 0 fake_diff[:, :2] = 0
fake_diff[:,-2:] = 0 fake_diff[:, -2:] = 0
return fake_diff return fake_diff
if upscale : if upscale:
print("*" * 80)
print("*"*80) print(
print(f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}") f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}"
print("*"*80) )
print("*" * 80)
k = 4 k = 4
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]*k) aimg, M = face_align.norm_crop2(
img, target_face.kps, self.input_size[0] * k
)
# upscale and restore face : # upscale and restore face :
bgr_fake = self.super_resolution(bgr_fake, k) bgr_fake = self.super_resolution(bgr_fake, k)
if opts.data.get("faceswaplab_upscaled_improved_mask", True) : if opts.data.get("faceswaplab_upscaled_improved_mask", True):
mask = get_face_mask(aimg,bgr_fake) mask = get_face_mask(aimg, bgr_fake)
bgr_fake = merge_images_with_mask(aimg, bgr_fake,mask) bgr_fake = merge_images_with_mask(aimg, bgr_fake, mask)
# compute fake_diff before sharpen and color correction (better result) # compute fake_diff before sharpen and color correction (better result)
fake_diff = compute_diff(bgr_fake, aimg) fake_diff = compute_diff(bgr_fake, aimg)
if opts.data.get("faceswaplab_upscaled_swapper_sharpen", True) : if opts.data.get("faceswaplab_upscaled_swapper_sharpen", True):
print("sharpen") print("sharpen")
# Add sharpness # Add sharpness
blurred = cv2.GaussianBlur(bgr_fake, (0, 0), 3) blurred = cv2.GaussianBlur(bgr_fake, (0, 0), 3)
bgr_fake = cv2.addWeighted(bgr_fake, 1.5, blurred, -0.5, 0) bgr_fake = cv2.addWeighted(bgr_fake, 1.5, blurred, -0.5, 0)
# Apply color corrections # Apply color corrections
if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True) : if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True):
print("color correction") print("color correction")
correction = processing.setup_color_correction(cv2_to_pil(aimg)) correction = processing.setup_color_correction(cv2_to_pil(aimg))
bgr_fake_pil = processing.apply_color_correction(correction, cv2_to_pil(bgr_fake)) bgr_fake_pil = processing.apply_color_correction(
correction, cv2_to_pil(bgr_fake)
)
bgr_fake = pil_to_cv2(bgr_fake_pil) bgr_fake = pil_to_cv2(bgr_fake_pil)
else:
else :
fake_diff = compute_diff(bgr_fake, aimg) fake_diff = compute_diff(bgr_fake, aimg)
IM = cv2.invertAffineTransform(M) IM = cv2.invertAffineTransform(M)
img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32) img_white = np.full(
bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) (aimg.shape[0], aimg.shape[1]), 255, dtype=np.float32
img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) )
fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) bgr_fake = cv2.warpAffine(
img_white[img_white>20] = 255 bgr_fake,
fthresh = opts.data.get('faceswaplab_upscaled_swapper_fthresh', 10) IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
img_white = cv2.warpAffine(
img_white,
IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
fake_diff = cv2.warpAffine(
fake_diff,
IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
img_white[img_white > 20] = 255
fthresh = opts.data.get("faceswaplab_upscaled_swapper_fthresh", 10)
print("fthresh", fthresh) print("fthresh", fthresh)
fake_diff[fake_diff<fthresh] = 0 fake_diff[fake_diff < fthresh] = 0
fake_diff[fake_diff>=fthresh] = 255 fake_diff[fake_diff >= fthresh] = 255
img_mask = img_white img_mask = img_white
mask_h_inds, mask_w_inds = np.where(img_mask==255) mask_h_inds, mask_w_inds = np.where(img_mask == 255)
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds) mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds) mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
mask_size = int(np.sqrt(mask_h*mask_w)) mask_size = int(np.sqrt(mask_h * mask_w))
erosion_factor = opts.data.get('faceswaplab_upscaled_swapper_erosion', 1) erosion_factor = opts.data.get(
k = max(int(mask_size//10*erosion_factor), int(10*erosion_factor)) "faceswaplab_upscaled_swapper_erosion", 1
)
kernel = np.ones((k,k),np.uint8) k = max(int(mask_size // 10 * erosion_factor), int(10 * erosion_factor))
img_mask = cv2.erode(img_mask,kernel,iterations = 1)
kernel = np.ones((2,2),np.uint8) kernel = np.ones((k, k), np.uint8)
fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1) img_mask = cv2.erode(img_mask, kernel, iterations=1)
k = max(int(mask_size//20*erosion_factor), int(5*erosion_factor)) kernel = np.ones((2, 2), np.uint8)
fake_diff = cv2.dilate(fake_diff, kernel, iterations=1)
k = max(int(mask_size // 20 * erosion_factor), int(5 * erosion_factor))
kernel_size = (k, k) kernel_size = (k, k)
blur_size = tuple(2*i+1 for i in kernel_size) blur_size = tuple(2 * i + 1 for i in kernel_size)
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0) img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
k = int(5*erosion_factor) k = int(5 * erosion_factor)
kernel_size = (k, k) kernel_size = (k, k)
blur_size = tuple(2*i+1 for i in kernel_size) blur_size = tuple(2 * i + 1 for i in kernel_size)
fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0) fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
img_mask /= 255 img_mask /= 255
fake_diff /= 255 fake_diff /= 255
img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1]) img_mask = np.reshape(
fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32) img_mask, [img_mask.shape[0], img_mask.shape[1], 1]
)
fake_merged = img_mask * bgr_fake + (1 - img_mask) * target_img.astype(
np.float32
)
fake_merged = fake_merged.astype(np.uint8) fake_merged = fake_merged.astype(np.uint8)
return fake_merged return fake_merged
except Exception as e : except Exception as e:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
raise e raise e

@ -20,13 +20,16 @@ from scripts.faceswaplab_utils.imgutils import pil_to_cv2
from scripts.faceswaplab_utils.models_utils import get_models from scripts.faceswaplab_utils.models_utils import get_models
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
import scripts.faceswaplab_swapping.swapper as swapper import scripts.faceswaplab_swapping.swapper as swapper
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from dataclasses import fields from dataclasses import fields
from typing import List from typing import List
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.models_utils import get_current_model
def compare(img1, img2): def compare(img1, img2):
if img1 is not None and img2 is not None: if img1 is not None and img2 is not None:
return swapper.compare_faces(img1, img2) return swapper.compare_faces(img1, img2)
@ -34,13 +37,27 @@ def compare(img1, img2):
return "You need 2 images to compare" return "You need 2 images to compare"
def extract_faces(
def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibility, codeformer_weight,upscaler_name,upscaler_scale, upscaler_visibility,inpainting_denoising_strengh, inpainting_prompt, inpainting_negative_prompt, inpainting_steps, inpainting_sampler,inpainting_when): files,
if not extract_path : extract_path,
face_restorer_name,
face_restorer_visibility,
codeformer_weight,
upscaler_name,
upscaler_scale,
upscaler_visibility,
inpainting_denoising_strengh,
inpainting_prompt,
inpainting_negative_prompt,
inpainting_steps,
inpainting_sampler,
inpainting_when,
):
if not extract_path:
tempfile.mkdtemp() tempfile.mkdtemp()
if files is not None: if files is not None:
images = [] images = []
for file in files : for file in files:
img = Image.open(file.name).convert("RGB") img = Image.open(file.name).convert("RGB")
faces = swapper.get_faces(pil_to_cv2(img)) faces = swapper.get_faces(pil_to_cv2(img))
if faces: if faces:
@ -50,40 +67,49 @@ def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibi
x_min, y_min, x_max, y_max = bbox x_min, y_min, x_max, y_max = bbox
face_image = img.crop((x_min, y_min, x_max, y_max)) face_image = img.crop((x_min, y_min, x_max, y_max))
if face_restorer_name or face_restorer_visibility: if face_restorer_name or face_restorer_visibility:
scale = 1 if face_image.width > 512 else 512//face_image.width scale = 1 if face_image.width > 512 else 512 // face_image.width
face_image = enhance_image(face_image, PostProcessingOptions(face_restorer_name=face_restorer_name, face_image = enhance_image(
restorer_visibility=face_restorer_visibility, face_image,
codeformer_weight= codeformer_weight, PostProcessingOptions(
upscaler_name=upscaler_name, face_restorer_name=face_restorer_name,
upscale_visibility=upscaler_visibility, restorer_visibility=face_restorer_visibility,
scale=scale, codeformer_weight=codeformer_weight,
inpainting_denoising_strengh=inpainting_denoising_strengh, upscaler_name=upscaler_name,
inpainting_prompt=inpainting_prompt, upscale_visibility=upscaler_visibility,
inpainting_steps=inpainting_steps, scale=scale,
inpainting_negative_prompt=inpainting_negative_prompt, inpainting_denoising_strengh=inpainting_denoising_strengh,
inpainting_when=inpainting_when, inpainting_prompt=inpainting_prompt,
inpainting_sampler=inpainting_sampler)) inpainting_steps=inpainting_steps,
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=extract_path).name inpainting_negative_prompt=inpainting_negative_prompt,
inpainting_when=inpainting_when,
inpainting_sampler=inpainting_sampler,
),
)
path = tempfile.NamedTemporaryFile(
delete=False, suffix=".png", dir=extract_path
).name
face_image.save(path) face_image.save(path)
face_images.append(path) face_images.append(path)
images+= face_images images += face_images
return images return images
return None return None
def analyse_faces(image, det_threshold = 0.5) :
try : def analyse_faces(image, det_threshold=0.5):
try:
faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold) faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold)
result = "" result = ""
for i,face in enumerate(faces) : for i, face in enumerate(faces):
result+= f"\nFace {i} \n" + "="*40 +"\n" result += f"\nFace {i} \n" + "=" * 40 + "\n"
result+= pformat(face) + "\n" result += pformat(face) + "\n"
result+= "="*40 result += "=" * 40
return result return result
except Exception as e : except Exception as e:
logger.error("Analysis Failed : %s", e) logger.error("Analysis Failed : %s", e)
return "Analysis Failed" return "Analysis Failed"
def build_face_checkpoint_and_save(batch_files, name): def build_face_checkpoint_and_save(batch_files, name):
""" """
Builds a face checkpoint, swaps faces, and saves the result to a file. Builds a face checkpoint, swaps faces, and saves the result to a file.
@ -102,7 +128,7 @@ def build_face_checkpoint_and_save(batch_files, name):
preview_path = os.path.join( preview_path = os.path.join(
scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references" scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references"
) )
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab","faces") faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces")
if not os.path.exists(faces_path): if not os.path.exists(faces_path):
os.makedirs(faces_path) os.makedirs(faces_path)
@ -116,22 +142,36 @@ def build_face_checkpoint_and_save(batch_files, name):
if name == "": if name == "":
name = "default_name" name = "default_name"
pprint(blended_face) pprint(blended_face)
result = swapper.swap_face(blended_face, blended_face, target_img, get_models()[0]) result = swapper.swap_face(
result_image = enhance_image(result.image, PostProcessingOptions(face_restorer_name="CodeFormer", restorer_visibility=1)) blended_face, blended_face, target_img, get_models()[0]
)
result_image = enhance_image(
result.image,
PostProcessingOptions(
face_restorer_name="CodeFormer", restorer_visibility=1
),
)
file_path = os.path.join(faces_path, f"{name}.pkl") file_path = os.path.join(faces_path, f"{name}.pkl")
file_number = 1 file_number = 1
while os.path.exists(file_path): while os.path.exists(file_path):
file_path = os.path.join(faces_path, f"{name}_{file_number}.pkl") file_path = os.path.join(faces_path, f"{name}_{file_number}.pkl")
file_number += 1 file_number += 1
result_image.save(file_path+".png") result_image.save(file_path + ".png")
with open(file_path, "wb") as file: with open(file_path, "wb") as file:
pickle.dump({"embedding" :blended_face.embedding, "gender" :blended_face.gender, "age" :blended_face.age},file) pickle.dump(
try : {
"embedding": blended_face.embedding,
"gender": blended_face.gender,
"age": blended_face.age,
},
file,
)
try:
with open(file_path, "rb") as file: with open(file_path, "rb") as file:
data = Face(pickle.load(file)) data = Face(pickle.load(file))
print(data) print(data)
except Exception as e : except Exception as e:
print(e) print(e)
return result_image return result_image
@ -139,48 +179,52 @@ def build_face_checkpoint_and_save(batch_files, name):
return target_img return target_img
def explore_onnx_faceswap_model(model_path): def explore_onnx_faceswap_model(model_path):
data = { data = {
'Node Name': [], "Node Name": [],
'Op Type': [], "Op Type": [],
'Inputs': [], "Inputs": [],
'Outputs': [], "Outputs": [],
'Attributes': [] "Attributes": [],
} }
if model_path: if model_path:
model = onnx.load(model_path) model = onnx.load(model_path)
for node in model.graph.node: for node in model.graph.node:
data['Node Name'].append(pformat(node.name)) data["Node Name"].append(pformat(node.name))
data['Op Type'].append(pformat(node.op_type)) data["Op Type"].append(pformat(node.op_type))
data['Inputs'].append(pformat(node.input)) data["Inputs"].append(pformat(node.input))
data['Outputs'].append(pformat(node.output)) data["Outputs"].append(pformat(node.output))
attributes = [] attributes = []
for attr in node.attribute: for attr in node.attribute:
attr_name = attr.name attr_name = attr.name
attr_value = attr.t attr_value = attr.t
attributes.append("{} = {}".format(pformat(attr_name), pformat(attr_value))) attributes.append(
data['Attributes'].append(attributes) "{} = {}".format(pformat(attr_name), pformat(attr_value))
)
data["Attributes"].append(attributes)
df = pd.DataFrame(data) df = pd.DataFrame(data)
return df return df
def batch_process(files, save_path, *components):
try : def batch_process(files, save_path, *components):
try:
if save_path is not None: if save_path is not None:
os.makedirs(save_path, exist_ok=True) os.makedirs(save_path, exist_ok=True)
units_count = opts.data.get("faceswaplab_units_count", 3) units_count = opts.data.get("faceswaplab_units_count", 3)
units: List[FaceSwapUnitSettings] = [] units: List[FaceSwapUnitSettings] = []
#Parse and convert units flat components into FaceSwapUnitSettings # Parse and convert units flat components into FaceSwapUnitSettings
for i in range(0, units_count): for i in range(0, units_count):
units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] units += [FaceSwapUnitSettings.get_unit_configuration(i, components)]
for i, u in enumerate(units): for i, u in enumerate(units):
logger.debug("%s, %s", pformat(i), pformat(u)) logger.debug("%s, %s", pformat(i), pformat(u))
#Parse the postprocessing options # Parse the postprocessing options
#We must first find where to start from (after face swapping units) # We must first find where to start from (after face swapping units)
len_conf: int = len(fields(FaceSwapUnitSettings)) len_conf: int = len(fields(FaceSwapUnitSettings))
shift: int = units_count * len_conf shift: int = units_count * len_conf
postprocess_options = PostProcessingOptions( postprocess_options = PostProcessingOptions(
@ -191,26 +235,36 @@ def batch_process(files, save_path, *components):
units = [u for u in units if u.enable] units = [u for u in units if u.enable]
if files is not None: if files is not None:
images = [] images = []
for file in files : for file in files:
current_images = [] current_images = []
src_image = Image.open(file.name).convert("RGB") src_image = Image.open(file.name).convert("RGB")
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False)) swapped_images = swapper.process_images_units(
get_current_model(),
images=[(src_image, None)],
units=units,
upscaled_swapper=opts.data.get(
"faceswaplab_upscaled_swapper", False
),
)
if len(swapped_images) > 0: if len(swapped_images) > 0:
current_images+= [img for img,info in swapped_images] current_images += [img for img, info in swapped_images]
logger.info("%s images generated", len(current_images)) logger.info("%s images generated", len(current_images))
for i, img in enumerate(current_images) : for i, img in enumerate(current_images):
current_images[i] = enhance_image(img,postprocess_options) current_images[i] = enhance_image(img, postprocess_options)
for img in current_images : for img in current_images:
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=save_path).name path = tempfile.NamedTemporaryFile(
delete=False, suffix=".png", dir=save_path
).name
img.save(path) img.save(path)
images += current_images images += current_images
return images return images
except Exception as e: except Exception as e:
logger.error("Batch Process error : %s",e) logger.error("Batch Process error : %s", e)
import traceback import traceback
traceback.print_exc() traceback.print_exc()
return None return None
@ -220,107 +274,164 @@ def tools_ui():
with gr.Tab("Tools"): with gr.Tab("Tools"):
with gr.Tab("Build"): with gr.Tab("Build"):
gr.Markdown( gr.Markdown(
"""Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.""") """Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory."""
)
with gr.Row(): with gr.Row():
batch_files = gr.components.File( batch_files = gr.components.File(
type="file", type="file",
file_count="multiple", file_count="multiple",
label="Batch Sources Images", label="Batch Sources Images",
optional=True, optional=True,
elem_id="faceswaplab_build_batch_files" elem_id="faceswaplab_build_batch_files",
)
preview = gr.components.Image(
type="pil",
label="Preview",
interactive=False,
elem_id="faceswaplab_build_preview_face",
) )
preview = gr.components.Image(type="pil", label="Preview", interactive=False, elem_id="faceswaplab_build_preview_face")
name = gr.Textbox( name = gr.Textbox(
value="Face", value="Face",
placeholder="Name of the character", placeholder="Name of the character",
label="Name of the character", label="Name of the character",
elem_id="faceswaplab_build_character_name" elem_id="faceswaplab_build_character_name",
)
generate_checkpoint_btn = gr.Button(
"Save", elem_id="faceswaplab_build_save_btn"
) )
generate_checkpoint_btn = gr.Button("Save",elem_id="faceswaplab_build_save_btn")
with gr.Tab("Compare"): with gr.Tab("Compare"):
gr.Markdown( gr.Markdown(
"""Give a similarity score between two images (only first face is compared).""") """Give a similarity score between two images (only first face is compared)."""
)
with gr.Row(): with gr.Row():
img1 = gr.components.Image(type="pil", img1 = gr.components.Image(
label="Face 1", type="pil", label="Face 1", elem_id="faceswaplab_compare_face1"
elem_id="faceswaplab_compare_face1"
) )
img2 = gr.components.Image(type="pil", img2 = gr.components.Image(
label="Face 2", type="pil", label="Face 2", elem_id="faceswaplab_compare_face2"
elem_id="faceswaplab_compare_face2"
) )
compare_btn = gr.Button("Compare",elem_id="faceswaplab_compare_btn") compare_btn = gr.Button("Compare", elem_id="faceswaplab_compare_btn")
compare_result_text = gr.Textbox( compare_result_text = gr.Textbox(
interactive=False, label="Similarity", value="0", elem_id="faceswaplab_compare_result" interactive=False,
label="Similarity",
value="0",
elem_id="faceswaplab_compare_result",
) )
with gr.Tab("Extract"): with gr.Tab("Extract"):
gr.Markdown( gr.Markdown(
"""Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""") """Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab."""
)
with gr.Row(): with gr.Row():
extracted_source_files = gr.components.File( extracted_source_files = gr.components.File(
type="file", type="file",
file_count="multiple", file_count="multiple",
label="Batch Sources Images", label="Batch Sources Images",
optional=True, optional=True,
elem_id="faceswaplab_extract_batch_images" elem_id="faceswaplab_extract_batch_images",
) )
extracted_faces = gr.Gallery( extracted_faces = gr.Gallery(
label="Extracted faces", show_label=False, label="Extracted faces",
elem_id="faceswaplab_extract_results" show_label=False,
).style(columns=[2], rows=[2]) elem_id="faceswaplab_extract_results",
extract_save_path = gr.Textbox(label="Destination Directory", value="", elem_id="faceswaplab_extract_destination") ).style(columns=[2], rows=[2])
extract_save_path = gr.Textbox(
label="Destination Directory",
value="",
elem_id="faceswaplab_extract_destination",
)
extract_btn = gr.Button("Extract", elem_id="faceswaplab_extract_btn") extract_btn = gr.Button("Extract", elem_id="faceswaplab_extract_btn")
with gr.Tab("Explore Model"): with gr.Tab("Explore Model"):
model = gr.Dropdown( model = gr.Dropdown(
choices=models, choices=models,
label="Model not found, please download one and reload automatic 1111", label="Model not found, please download one and reload automatic 1111",
elem_id="faceswaplab_explore_model" elem_id="faceswaplab_explore_model",
) )
explore_btn = gr.Button("Explore", elem_id="faceswaplab_explore_btn") explore_btn = gr.Button("Explore", elem_id="faceswaplab_explore_btn")
explore_result_text = gr.Dataframe( explore_result_text = gr.Dataframe(
interactive=False, label="Explored", interactive=False,
elem_id="faceswaplab_explore_result" label="Explored",
elem_id="faceswaplab_explore_result",
) )
with gr.Tab("Analyse Face"): with gr.Tab("Analyse Face"):
img_to_analyse = gr.components.Image(type="pil", label="Face", elem_id="faceswaplab_analyse_face") img_to_analyse = gr.components.Image(
analyse_det_threshold = gr.Slider(0.1, 1, 0.5, step=0.01, label="Detection threshold", elem_id="faceswaplab_analyse_det_threshold") type="pil", label="Face", elem_id="faceswaplab_analyse_face"
)
analyse_det_threshold = gr.Slider(
0.1,
1,
0.5,
step=0.01,
label="Detection threshold",
elem_id="faceswaplab_analyse_det_threshold",
)
analyse_btn = gr.Button("Analyse", elem_id="faceswaplab_analyse_btn") analyse_btn = gr.Button("Analyse", elem_id="faceswaplab_analyse_btn")
analyse_results = gr.Textbox(label="Results", interactive=False, value="", elem_id="faceswaplab_analyse_results") analyse_results = gr.Textbox(
label="Results",
interactive=False,
value="",
elem_id="faceswaplab_analyse_results",
)
with gr.Tab("Batch Process"): with gr.Tab("Batch Process"):
with gr.Tab("Source Images"): with gr.Tab("Source Images"):
gr.Markdown( gr.Markdown(
"""Batch process images. Will apply enhancement in the tools enhancement tab.""") """Batch process images. Will apply enhancement in the tools enhancement tab."""
)
with gr.Row(): with gr.Row():
batch_source_files = gr.components.File( batch_source_files = gr.components.File(
type="file", type="file",
file_count="multiple", file_count="multiple",
label="Batch Sources Images", label="Batch Sources Images",
optional=True, optional=True,
elem_id="faceswaplab_batch_images" elem_id="faceswaplab_batch_images",
) )
batch_results = gr.Gallery( batch_results = gr.Gallery(
label="Batch result", show_label=False, label="Batch result",
elem_id="faceswaplab_batch_results" show_label=False,
).style(columns=[2], rows=[2]) elem_id="faceswaplab_batch_results",
batch_save_path = gr.Textbox(label="Destination Directory", value="outputs/faceswap/", elem_id="faceswaplab_batch_destination") ).style(columns=[2], rows=[2])
batch_save_btn= gr.Button("Process & Save", elem_id="faceswaplab_extract_btn") batch_save_path = gr.Textbox(
label="Destination Directory",
value="outputs/faceswap/",
elem_id="faceswaplab_batch_destination",
)
batch_save_btn = gr.Button(
"Process & Save", elem_id="faceswaplab_extract_btn"
)
unit_components = [] unit_components = []
for i in range(1,opts.data.get("faceswaplab_units_count", 3)+1): for i in range(1, opts.data.get("faceswaplab_units_count", 3) + 1):
unit_components += faceswap_unit_ui(False, i, id_prefix="faceswaplab_tab") unit_components += faceswap_unit_ui(False, i, id_prefix="faceswaplab_tab")
upscale_options = upscaler_ui() upscale_options = upscaler_ui()
explore_btn.click(explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text]) explore_btn.click(
explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text]
)
compare_btn.click(compare, inputs=[img1, img2], outputs=[compare_result_text]) compare_btn.click(compare, inputs=[img1, img2], outputs=[compare_result_text])
generate_checkpoint_btn.click(build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview]) generate_checkpoint_btn.click(
extract_btn.click(extract_faces, inputs=[extracted_source_files, extract_save_path]+upscale_options, outputs=[extracted_faces]) build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview]
analyse_btn.click(analyse_faces, inputs=[img_to_analyse,analyse_det_threshold], outputs=[analyse_results]) )
batch_save_btn.click(batch_process, inputs=[batch_source_files, batch_save_path]+unit_components+upscale_options, outputs=[batch_results]) extract_btn.click(
extract_faces,
inputs=[extracted_source_files, extract_save_path] + upscale_options,
outputs=[extracted_faces],
)
analyse_btn.click(
analyse_faces,
inputs=[img_to_analyse, analyse_det_threshold],
outputs=[analyse_results],
)
batch_save_btn.click(
batch_process,
inputs=[batch_source_files, batch_save_path]
+ unit_components
+ upscale_options,
outputs=[batch_results],
)
def on_ui_tabs() :
def on_ui_tabs():
with gr.Blocks(analytics_enabled=False) as ui_faceswap: with gr.Blocks(analytics_enabled=False) as ui_faceswap:
tools_ui() tools_ui()
return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")] return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")]

@ -8,20 +8,20 @@ import dill as pickle
import gradio as gr import gradio as gr
from insightface.app.common import Face from insightface.app.common import Face
from PIL import Image from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw) from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
@dataclass @dataclass
class FaceSwapUnitSettings: class FaceSwapUnitSettings:
# ORDER of parameters is IMPORTANT. It should match the result of faceswap_unit_ui # ORDER of parameters is IMPORTANT. It should match the result of faceswap_unit_ui
# The image given in reference # The image given in reference
source_img: Union[Image.Image, str] source_img: Union[Image.Image, str]
# The checkpoint file # The checkpoint file
source_face : str source_face: str
# The batch source images # The batch source images
_batch_files: Union[gr.components.File,List[Image.Image]] _batch_files: Union[gr.components.File, List[Image.Image]]
# Will blend faces if True # Will blend faces if True
blend_faces: bool blend_faces: bool
# Enable this unit # Enable this unit
@ -29,11 +29,11 @@ class FaceSwapUnitSettings:
# Use same gender filtering # Use same gender filtering
same_gender: bool same_gender: bool
# Sort faces by their size (from larger to smaller) # Sort faces by their size (from larger to smaller)
sort_by_size : bool sort_by_size: bool
# If True, discard images with low similarity # If True, discard images with low similarity
check_similarity : bool check_similarity: bool
# if True will compute similarity and add it to the image info # if True will compute similarity and add it to the image info
_compute_similarity :bool _compute_similarity: bool
# Minimum similarity against the used face (reference, batch or checkpoint) # Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float min_sim: float
@ -42,7 +42,7 @@ class FaceSwapUnitSettings:
# The face index to use for swapping # The face index to use for swapping
_faces_index: str _faces_index: str
# The face index to get image from source # The face index to get image from source
reference_face_index : int reference_face_index: int
# Swap in the source image in img2img (before processing) # Swap in the source image in img2img (before processing)
swap_in_source: bool swap_in_source: bool
@ -72,7 +72,7 @@ class FaceSwapUnitSettings:
return faces_index return faces_index
@property @property
def compute_similarity(self) : def compute_similarity(self):
return self._compute_similarity or self.check_similarity return self._compute_similarity or self.check_similarity
@property @property
@ -83,57 +83,65 @@ class FaceSwapUnitSettings:
return self._batch_files or [] return self._batch_files or []
@property @property
def reference_face(self) : def reference_face(self):
""" """
Extract reference face (only once and store it for the rest of processing). Extract reference face (only once and store it for the rest of processing).
Reference face is the checkpoint or the source image or the first image in the batch in that order. Reference face is the checkpoint or the source image or the first image in the batch in that order.
""" """
if not hasattr(self,"_reference_face") : if not hasattr(self, "_reference_face"):
if self.source_face and self.source_face != "None" : if self.source_face and self.source_face != "None":
with open(self.source_face, "rb") as file: with open(self.source_face, "rb") as file:
try : try:
logger.info(f"loading pickle {file.name}") logger.info(f"loading pickle {file.name}")
face = Face(pickle.load(file)) face = Face(pickle.load(file))
self._reference_face = face self._reference_face = face
except Exception as e : except Exception as e:
logger.error("Failed to load checkpoint : %s", e) logger.error("Failed to load checkpoint : %s", e)
elif self.source_img is not None : elif self.source_img is not None:
if isinstance(self.source_img, str): # source_img is a base64 string if isinstance(self.source_img, str): # source_img is a base64 string
if 'base64,' in self.source_img: # check if the base64 string has a data URL scheme if (
base64_data = self.source_img.split('base64,')[-1] "base64," in self.source_img
): # check if the base64 string has a data URL scheme
base64_data = self.source_img.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data) img_bytes = base64.b64decode(base64_data)
else: else:
# if no data URL scheme, just decode # if no data URL scheme, just decode
img_bytes = base64.b64decode(self.source_img) img_bytes = base64.b64decode(self.source_img)
self.source_img = Image.open(io.BytesIO(img_bytes)) self.source_img = Image.open(io.BytesIO(img_bytes))
source_img = pil_to_cv2(self.source_img) source_img = pil_to_cv2(self.source_img)
self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), self.reference_face_index, None) self._reference_face = swapper.get_or_default(
if self._reference_face is None : swapper.get_faces(source_img), self.reference_face_index, None
)
if self._reference_face is None:
logger.error("Face not found in reference image") logger.error("Face not found in reference image")
else : else:
self._reference_face = None self._reference_face = None
if self._reference_face is None : if self._reference_face is None:
logger.error("You need at least one reference face") logger.error("You need at least one reference face")
return self._reference_face return self._reference_face
@property @property
def faces(self) : def faces(self):
"""_summary_ """_summary_
Extract all faces (including reference face) to provide an array of faces Extract all faces (including reference face) to provide an array of faces
Only processed once. Only processed once.
""" """
if self.batch_files is not None and not hasattr(self,"_faces") : if self.batch_files is not None and not hasattr(self, "_faces"):
self._faces = [self.reference_face] if self.reference_face is not None else [] self._faces = (
for file in self.batch_files : [self.reference_face] if self.reference_face is not None else []
if isinstance(file, Image.Image) : )
for file in self.batch_files:
if isinstance(file, Image.Image):
img = file img = file
else : else:
img = Image.open(file.name) img = Image.open(file.name)
face = swapper.get_or_default(swapper.get_faces(pil_to_cv2(img)), 0, None) face = swapper.get_or_default(
if face is not None : swapper.get_faces(pil_to_cv2(img)), 0, None
)
if face is not None:
self._faces.append(face) self._faces.append(face)
return self._faces return self._faces
@ -142,11 +150,26 @@ class FaceSwapUnitSettings:
""" """
Blend the faces using the mean of all embeddings Blend the faces using the mean of all embeddings
""" """
if not hasattr(self,"_blended_faces") : if not hasattr(self, "_blended_faces"):
self._blended_faces = swapper.blend_faces(self.faces) self._blended_faces = swapper.blend_faces(self.faces)
assert(all([not np.array_equal(self._blended_faces.embedding, face.embedding) for face in self.faces]) if len(self.faces) > 1 else True), "Blended faces cannot be the same as one of the face if len(face)>0" assert (
assert(not np.array_equal(self._blended_faces.embedding,self.reference_face.embedding) if len(self.faces) > 1 else True), "Blended faces cannot be the same as reference face if len(face)>0" all(
[
not np.array_equal(
self._blended_faces.embedding, face.embedding
)
for face in self.faces
]
)
if len(self.faces) > 1
else True
), "Blended faces cannot be the same as one of the face if len(face)>0"
assert (
not np.array_equal(
self._blended_faces.embedding, self.reference_face.embedding
)
if len(self.faces) > 1
else True
), "Blended faces cannot be the same as reference face if len(face)>0"
return self._blended_faces return self._blended_faces

@ -1,94 +1,143 @@
from scripts.faceswaplab_utils.models_utils import get_face_checkpoints from scripts.faceswaplab_utils.models_utils import get_face_checkpoints
import gradio as gr import gradio as gr
def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"): def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"):
with gr.Tab(f"Face {unit_num}"): with gr.Tab(f"Face {unit_num}"):
with gr.Column(): with gr.Column():
gr.Markdown( gr.Markdown(
"""Reference is an image. First face will be extracted. """Reference is an image. First face will be extracted.
First face of batches sources will be extracted and used as input (or blended if blend is activated).""") First face of batches sources will be extracted and used as input (or blended if blend is activated)."""
)
with gr.Row(): with gr.Row():
img = gr.components.Image(type="pil", label="Reference", elem_id=f"{id_prefix}_face{unit_num}_reference_image") img = gr.components.Image(
type="pil",
label="Reference",
elem_id=f"{id_prefix}_face{unit_num}_reference_image",
)
batch_files = gr.components.File( batch_files = gr.components.File(
type="file", type="file",
file_count="multiple", file_count="multiple",
label="Batch Sources Images", label="Batch Sources Images",
optional=True, optional=True,
elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files" elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files",
) )
gr.Markdown( gr.Markdown(
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""") """Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image."""
with gr.Row() : )
with gr.Row():
face = gr.Dropdown( face = gr.Dropdown(
choices=get_face_checkpoints(), choices=get_face_checkpoints(),
label="Face Checkpoint (precedence over reference face)", label="Face Checkpoint (precedence over reference face)",
elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint" elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint",
)
refresh = gr.Button(
value="",
variant="tool",
elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints",
) )
refresh = gr.Button(value='', variant='tool', elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints")
def refresh_fn(selected): def refresh_fn(selected):
return gr.Dropdown.update(value=selected, choices=get_face_checkpoints()) return gr.Dropdown.update(
refresh.click(fn=refresh_fn,inputs=face, outputs=face) value=selected, choices=get_face_checkpoints()
)
refresh.click(fn=refresh_fn, inputs=face, outputs=face)
with gr.Row(): with gr.Row():
enable = gr.Checkbox(False, placeholder="enable", label="Enable", elem_id=f"{id_prefix}_face{unit_num}_enable") enable = gr.Checkbox(
False,
placeholder="enable",
label="Enable",
elem_id=f"{id_prefix}_face{unit_num}_enable",
)
blend_faces = gr.Checkbox( blend_faces = gr.Checkbox(
True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)", True,
placeholder="Blend Faces",
label="Blend Faces ((Source|Checkpoint)+References = 1)",
elem_id=f"{id_prefix}_face{unit_num}_blend_faces", elem_id=f"{id_prefix}_face{unit_num}_blend_faces",
interactive=True interactive=True,
) )
gr.Markdown("""Discard images with low similarity or no faces :""") gr.Markdown("""Discard images with low similarity or no faces :""")
with gr.Row(): with gr.Row():
check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity", check_similarity = gr.Checkbox(
elem_id=f"{id_prefix}_face{unit_num}_check_similarity") False,
compute_similarity = gr.Checkbox(False, label="Compute similarity", placeholder="discard",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity") label="Check similarity",
min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity", elem_id=f"{id_prefix}_face{unit_num}_check_similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity") )
compute_similarity = gr.Checkbox(
False,
label="Compute similarity",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity",
)
min_sim = gr.Slider(
0,
1,
0,
step=0.01,
label="Min similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity",
)
min_ref_sim = gr.Slider( min_ref_sim = gr.Slider(
0, 1, 0, step=0.01, label="Min reference similarity", 0,
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity" 1,
0,
step=0.01,
label="Min reference similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity",
) )
gr.Markdown("""Select the face to be swapped, you can sort by size or use the same gender as the desired face:""") gr.Markdown(
"""Select the face to be swapped, you can sort by size or use the same gender as the desired face:"""
)
with gr.Row(): with gr.Row():
same_gender = gr.Checkbox( same_gender = gr.Checkbox(
False, placeholder="Same Gender", label="Same Gender", False,
elem_id=f"{id_prefix}_face{unit_num}_same_gender" placeholder="Same Gender",
label="Same Gender",
elem_id=f"{id_prefix}_face{unit_num}_same_gender",
) )
sort_by_size = gr.Checkbox( sort_by_size = gr.Checkbox(
False, placeholder="Sort by size", label="Sort by size (larger>smaller)", False,
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size" placeholder="Sort by size",
label="Sort by size (larger>smaller)",
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size",
) )
target_faces_index = gr.Textbox( target_faces_index = gr.Textbox(
value="0", value="0",
placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)", placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)",
label="Target face : Comma separated face number(s)", label="Target face : Comma separated face number(s)",
elem_id=f"{id_prefix}_face{unit_num}_target_faces_index" elem_id=f"{id_prefix}_face{unit_num}_target_faces_index",
)
gr.Markdown(
"""The following will only affect reference face image (and is not affected by sort by size) :"""
) )
gr.Markdown("""The following will only affect reference face image (and is not affected by sort by size) :""")
reference_faces_index = gr.Number( reference_faces_index = gr.Number(
value=0, value=0,
precision=0, precision=0,
minimum=0, minimum=0,
placeholder="Which face to get from reference image start from 0", placeholder="Which face to get from reference image start from 0",
label="Reference source face : start from 0", label="Reference source face : start from 0",
elem_id=f"{id_prefix}_face{unit_num}_reference_face_index" elem_id=f"{id_prefix}_face{unit_num}_reference_face_index",
)
gr.Markdown(
"""Configure swapping. Swapping can occure before img2img, after or both :""",
visible=is_img2img,
) )
gr.Markdown("""Configure swapping. Swapping can occure before img2img, after or both :""", visible=is_img2img)
swap_in_source = gr.Checkbox( swap_in_source = gr.Checkbox(
False, False,
placeholder="Swap face in source image", placeholder="Swap face in source image",
label="Swap in source image (blended face)", label="Swap in source image (blended face)",
visible=is_img2img, visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_source" elem_id=f"{id_prefix}_face{unit_num}_swap_in_source",
) )
swap_in_generated = gr.Checkbox( swap_in_generated = gr.Checkbox(
True, True,
placeholder="Swap face in generated image", placeholder="Swap face in generated image",
label="Swap in generated image", label="Swap in generated image",
visible=is_img2img, visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated" elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated",
) )
# If changed, you need to change FaceSwapUnitSettings accordingly # If changed, you need to change FaceSwapUnitSettings accordingly
# ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings # ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings

@ -6,63 +6,122 @@ from modules.shared import cmd_opts, opts, state
import scripts.faceswaplab_postprocessing.upscaling as upscaling import scripts.faceswaplab_postprocessing.upscaling as upscaling
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
def upscaler_ui(): def upscaler_ui():
with gr.Tab(f"Post-Processing"): with gr.Tab(f"Post-Processing"):
gr.Markdown( gr.Markdown(
"""Upscaling is performed on the whole image. Upscaling happens before face restoration.""") """Upscaling is performed on the whole image. Upscaling happens before face restoration."""
)
with gr.Row(): with gr.Row():
face_restorer_name = gr.Radio( face_restorer_name = gr.Radio(
label="Restore Face", label="Restore Face",
choices=["None"] + [x.name() for x in shared.face_restorers], choices=["None"] + [x.name() for x in shared.face_restorers],
value=lambda : opts.data.get("faceswaplab_pp_default_face_restorer", shared.face_restorers[0].name()), value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer",
shared.face_restorers[0].name(),
),
type="value", type="value",
elem_id="faceswaplab_pp_face_restorer" elem_id="faceswaplab_pp_face_restorer",
) )
with gr.Column(): with gr.Column():
face_restorer_visibility = gr.Slider( face_restorer_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_visibility", 1), step=0.001, label="Restore visibility", 0,
elem_id="faceswaplab_pp_face_restorer_visibility" 1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer_visibility", 1
),
step=0.001,
label="Restore visibility",
elem_id="faceswaplab_pp_face_restorer_visibility",
) )
codeformer_weight = gr.Slider( codeformer_weight = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_weight", 1), step=0.001, label="codeformer weight", 0,
elem_id="faceswaplab_pp_face_restorer_weight" 1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer_weight", 1
),
step=0.001,
label="codeformer weight",
elem_id="faceswaplab_pp_face_restorer_weight",
) )
upscaler_name = gr.Dropdown( upscaler_name = gr.Dropdown(
choices=[upscaler.name for upscaler in shared.sd_upscalers], choices=[upscaler.name for upscaler in shared.sd_upscalers],
value= lambda:opts.data.get("faceswaplab_pp_default_upscaler","None"), value=lambda: opts.data.get("faceswaplab_pp_default_upscaler", "None"),
label="Upscaler", label="Upscaler",
elem_id="faceswaplab_pp_upscaler" elem_id="faceswaplab_pp_upscaler",
)
upscaler_scale = gr.Slider(
1,
8,
1,
step=0.1,
label="Upscaler scale",
elem_id="faceswaplab_pp_upscaler_scale",
) )
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale", elem_id="faceswaplab_pp_upscaler_scale")
upscaler_visibility = gr.Slider( upscaler_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_upscaler_visibility", 1), step=0.1, label="Upscaler visibility (if scale = 1)", 0,
elem_id="faceswaplab_pp_upscaler_visibility" 1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_upscaler_visibility", 1
),
step=0.1,
label="Upscaler visibility (if scale = 1)",
elem_id="faceswaplab_pp_upscaler_visibility",
) )
with gr.Accordion(f"Post Inpainting", open=True): with gr.Accordion(f"Post Inpainting", open=True):
gr.Markdown( gr.Markdown(
"""Inpainting sends image to inpainting with a mask on face (once for each faces).""") """Inpainting sends image to inpainting with a mask on face (once for each faces)."""
)
inpainting_when = gr.Dropdown( inpainting_when = gr.Dropdown(
elem_id="faceswaplab_pp_inpainting_when", choices = [e.value for e in upscaling.InpaintingWhen.__members__.values()],value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value], label="Enable/When") elem_id="faceswaplab_pp_inpainting_when",
choices=[
e.value for e in upscaling.InpaintingWhen.__members__.values()
],
value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value],
label="Enable/When",
)
inpainting_denoising_strength = gr.Slider( inpainting_denoising_strength = gr.Slider(
0, 1, 0, step=0.01, elem_id="faceswaplab_pp_inpainting_denoising_strength", label="Denoising strenght (will send face to img2img after processing)" 0,
1,
0,
step=0.01,
elem_id="faceswaplab_pp_inpainting_denoising_strength",
label="Denoising strenght (will send face to img2img after processing)",
) )
inpainting_denoising_prompt = gr.Textbox("Portrait of a [gender]",elem_id="faceswaplab_pp_inpainting_denoising_prompt", label="Inpainting prompt use [gender] instead of men or woman") inpainting_denoising_prompt = gr.Textbox(
inpainting_denoising_negative_prompt = gr.Textbox("", elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt", label="Inpainting negative prompt use [gender] instead of men or woman") "Portrait of a [gender]",
elem_id="faceswaplab_pp_inpainting_denoising_prompt",
label="Inpainting prompt use [gender] instead of men or woman",
)
inpainting_denoising_negative_prompt = gr.Textbox(
"",
elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt",
label="Inpainting negative prompt use [gender] instead of men or woman",
)
with gr.Row(): with gr.Row():
samplers_names = [s.name for s in modules.sd_samplers.all_samplers] samplers_names = [s.name for s in modules.sd_samplers.all_samplers]
inpainting_sampler = gr.Dropdown( inpainting_sampler = gr.Dropdown(
choices=samplers_names, choices=samplers_names,
value=[samplers_names[0]], value=[samplers_names[0]],
label="Inpainting Sampler", label="Inpainting Sampler",
elem_id="faceswaplab_pp_inpainting_sampler" elem_id="faceswaplab_pp_inpainting_sampler",
) )
inpainting_denoising_steps = gr.Slider( inpainting_denoising_steps = gr.Slider(
1, 150, 20, step=1, label="Inpainting steps", 1,
elem_id="faceswaplab_pp_inpainting_steps" 150,
20,
step=1,
label="Inpainting steps",
elem_id="faceswaplab_pp_inpainting_steps",
) )
inpaiting_model = gr.Dropdown(choices=["Current"]+sd_models.checkpoint_tiles(), default="Current", label="sd model (experimental)", elem_id="faceswaplab_pp_inpainting_sd_model") inpaiting_model = gr.Dropdown(
choices=["Current"] + sd_models.checkpoint_tiles(),
default="Current",
label="sd model (experimental)",
elem_id="faceswaplab_pp_inpainting_sd_model",
)
return [ return [
face_restorer_name, face_restorer_name,
face_restorer_visibility, face_restorer_visibility,
@ -76,5 +135,5 @@ def upscaler_ui():
inpainting_denoising_steps, inpainting_denoising_steps,
inpainting_sampler, inpainting_sampler,
inpainting_when, inpainting_when,
inpaiting_model inpaiting_model,
] ]

@ -4,6 +4,7 @@ import sys
from modules import shared from modules import shared
from PIL import Image from PIL import Image
class ColoredFormatter(logging.Formatter): class ColoredFormatter(logging.Formatter):
COLORS = { COLORS = {
"DEBUG": "\033[0;36m", # CYAN "DEBUG": "\033[0;36m", # CYAN
@ -40,12 +41,16 @@ loglevel = getattr(logging, loglevel_string.upper(), "INFO")
logger.setLevel(loglevel) logger.setLevel(loglevel)
import tempfile import tempfile
if logger.getEffectiveLevel() <= logging.DEBUG :
if logger.getEffectiveLevel() <= logging.DEBUG:
DEBUG_DIR = tempfile.mkdtemp() DEBUG_DIR = tempfile.mkdtemp()
def save_img_debug(img: Image.Image, message: str, *opts): def save_img_debug(img: Image.Image, message: str, *opts):
if logger.getEffectiveLevel() <= logging.DEBUG: if logger.getEffectiveLevel() <= logging.DEBUG:
with tempfile.NamedTemporaryFile(dir=DEBUG_DIR, delete=False, suffix=".png") as temp_file: with tempfile.NamedTemporaryFile(
dir=DEBUG_DIR, delete=False, suffix=".png"
) as temp_file:
img_path = temp_file.name img_path = temp_file.name
img.save(img_path) img.save(img_path)

@ -1,6 +1,6 @@
import io import io
from typing import Optional from typing import Optional
from PIL import Image, ImageChops, ImageOps,ImageFilter from PIL import Image, ImageChops, ImageOps, ImageFilter
import cv2 import cv2
import numpy as np import numpy as np
from math import isqrt, ceil from math import isqrt, ceil
@ -10,6 +10,7 @@ from scripts.faceswaplab_globals import NSFW_SCORE
from modules import processing from modules import processing
import base64 import base64
def check_against_nsfw(img): def check_against_nsfw(img):
shapes = [] shapes = []
chunks = detect(img) chunks = detect(img)
@ -17,6 +18,7 @@ def check_against_nsfw(img):
shapes.append(chunk["score"] > NSFW_SCORE) shapes.append(chunk["score"] > NSFW_SCORE)
return any(shapes) return any(shapes)
def pil_to_cv2(pil_img): def pil_to_cv2(pil_img):
return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR) return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
@ -24,6 +26,7 @@ def pil_to_cv2(pil_img):
def cv2_to_pil(cv2_img): def cv2_to_pil(cv2_img):
return Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)) return Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB))
def torch_to_pil(images): def torch_to_pil(images):
""" """
Convert a numpy image or a batch of images to a PIL image. Convert a numpy image or a batch of images to a PIL image.
@ -49,7 +52,10 @@ def pil_to_torch(pil_images):
torch_image = torch.from_numpy(numpy_image).permute(2, 0, 1) torch_image = torch.from_numpy(numpy_image).permute(2, 0, 1)
return torch_image return torch_image
from collections import Counter from collections import Counter
def create_square_image(image_list): def create_square_image(image_list):
""" """
Creates a square image by combining multiple images in a grid pattern. Creates a square image by combining multiple images in a grid pattern.
@ -101,6 +107,7 @@ def create_square_image(image_list):
# Return None if there are no images or only one image in the image_list # Return None if there are no images or only one image in the image_list
return None return None
def create_mask(image, box_coords): def create_mask(image, box_coords):
width, height = image.size width, height = image.size
mask = Image.new("L", (width, height), 255) mask = Image.new("L", (width, height), 255)
@ -113,7 +120,10 @@ def create_mask(image, box_coords):
mask.putpixel((x, y), 0) mask.putpixel((x, y), 0)
return mask return mask
def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch_index : int) -> Image.Image :
def apply_mask(
img: Image.Image, p: processing.StableDiffusionProcessing, batch_index: int
) -> Image.Image:
""" """
Apply mask overlay and color correction to an image if enabled Apply mask overlay and color correction to an image if enabled
@ -125,24 +135,25 @@ def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch
Returns: Returns:
PIL Image object PIL Image object
""" """
if isinstance(p, processing.StableDiffusionProcessingImg2Img) : if isinstance(p, processing.StableDiffusionProcessingImg2Img):
if p.inpaint_full_res : if p.inpaint_full_res:
overlays = p.overlay_images overlays = p.overlay_images
if overlays is None or batch_index >= len(overlays): if overlays is None or batch_index >= len(overlays):
return img return img
overlay : Image.Image = overlays[batch_index] overlay: Image.Image = overlays[batch_index]
overlay = overlay.resize((img.size), resample= Image.Resampling.LANCZOS) overlay = overlay.resize((img.size), resample=Image.Resampling.LANCZOS)
img = img.copy() img = img.copy()
img.paste(overlay, (0, 0), overlay) img.paste(overlay, (0, 0), overlay)
return img return img
img = processing.apply_overlay(img, p.paste_to, batch_index, p.overlay_images) img = processing.apply_overlay(img, p.paste_to, batch_index, p.overlay_images)
if p.color_corrections is not None and batch_index < len(p.color_corrections): if p.color_corrections is not None and batch_index < len(p.color_corrections):
img = processing.apply_color_correction(p.color_corrections[batch_index], img) img = processing.apply_color_correction(
p.color_corrections[batch_index], img
)
return img return img
def prepare_mask( def prepare_mask(
mask: Image.Image, p: processing.StableDiffusionProcessing mask: Image.Image, p: processing.StableDiffusionProcessing
) -> Image.Image: ) -> Image.Image:
@ -167,19 +178,19 @@ def prepare_mask(
mask (Image.Image): The prepared mask as a PIL Image object. mask (Image.Image): The prepared mask as a PIL Image object.
""" """
mask = mask.convert("L") mask = mask.convert("L")
#FIXME : Properly fix blur # FIXME : Properly fix blur
# if getattr(p, "mask_blur", 0) > 0: # if getattr(p, "mask_blur", 0) > 0:
# mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur)) # mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur))
return mask return mask
def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] :
if base64str is None : def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]:
if base64str is None:
return None return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme if "base64," in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1] base64_data = base64str.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data) img_bytes = base64.b64decode(base64_data)
else: else:
# if no data URL scheme, just decode # if no data URL scheme, just decode
img_bytes = base64.b64decode(base64str) img_bytes = base64.b64decode(base64str)
return Image.open(io.BytesIO(img_bytes)) return Image.open(io.BytesIO(img_bytes))

@ -1,4 +1,3 @@
import glob import glob
import os import os
import modules.scripts as scripts import modules.scripts as scripts
@ -7,6 +6,7 @@ from scripts.faceswaplab_globals import EXTENSION_PATH
from modules.shared import opts from modules.shared import opts
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
def get_models(): def get_models():
""" """
Retrieve a list of swap model files. Retrieve a list of swap model files.
@ -29,17 +29,21 @@ def get_models():
return models return models
def get_current_model() -> str :
def get_current_model() -> str:
model = opts.data.get("faceswaplab_model", None) model = opts.data.get("faceswaplab_model", None)
if model is None : if model is None:
models = get_models() models = get_models()
model = models[0] if len(models) else None model = models[0] if len(models) else None
logger.info("Try to use model : %s", model) logger.info("Try to use model : %s", model)
if not os.path.isfile(model): if not os.path.isfile(model):
logger.error("The model %s cannot be found or loaded", model) logger.error("The model %s cannot be found or loaded", model)
raise FileNotFoundError("No faceswap model found. Please add it to the faceswaplab directory.") raise FileNotFoundError(
"No faceswap model found. Please add it to the faceswaplab directory."
)
return model return model
def get_face_checkpoints(): def get_face_checkpoints():
""" """
Retrieve a list of face checkpoint paths. Retrieve a list of face checkpoint paths.
@ -50,6 +54,8 @@ def get_face_checkpoints():
Returns: Returns:
list: A list of face paths, including the string "None" as the first element. list: A list of face paths, including the string "None" as the first element.
""" """
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl") faces_path = os.path.join(
scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl"
)
faces = glob.glob(faces_path) faces = glob.glob(faces_path)
return ["None"] + faces return ["None"] + faces

Loading…
Cancel
Save