add pre-commit hooks configuration

main
Tran Xen 2 years ago
parent 8577d0186d
commit 5d4a29ff1e

@ -0,0 +1,14 @@
repos:
- repo: https://github.com/psf/black
rev: 23.7.0
hooks:
- id: black
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-docstring-first
- id: detect-private-key
- id: trailing-whitespace
- id: fix-byte-order-marker

@ -6,87 +6,153 @@ import base64, io
from io import BytesIO
from typing import List, Tuple, Optional
class InpaintingWhen(Enum):
NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All"
class FaceSwapUnit(BaseModel) :
class FaceSwapUnit(BaseModel):
# The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
source_img: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
# The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None)
source_face: str = Field(
description="face checkpoint (from models/faceswaplab/faces)",
examples=["my_face.pkl"],
default=None,
)
# base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
batch_images: Tuple[str] = Field(
description="list of base64 batch source images",
examples=[
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
],
default=None,
)
# Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True)
blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True)
same_gender: bool = Field(description="Use same gender filtering", default=True)
# If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False)
check_similarity: bool = Field(
description="If True, discard images with low similarity", default=False
)
# if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False)
compute_similarity: bool = Field(
description="If True will compute similarity and add it to the image info",
default=False,
)
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0)
min_sim: float = Field(
description="Minimum similarity against the used face (reference, batch or checkpoint)",
default=0.0,
)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0)
min_ref_sim: float = Field(
description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)",
default=0.0,
)
# The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,))
faces_index: Tuple[int] = Field(
description="The face index to use for swapping, list of face numbers starting from 0",
default=(0,),
)
class PostProcessingOptions(BaseModel):
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0)
upscaler_name: str = Field(description='upscaler name', default=None)
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0)
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0)
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0)
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]")
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler")
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER)
face_restorer_name: str = Field(description="face restorer name", default=None)
restorer_visibility: float = Field(
description="face restorer visibility", default=1, le=1, ge=0
)
codeformer_weight: float = Field(
description="face restorer codeformer weight", default=1, le=1, ge=0
)
upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscale_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0
)
inpainting_denoising_strengh: float = Field(
description="Inpainting denoising strenght", default=0, lt=1, ge=0
)
inpainting_prompt: str = Field(
description="Inpainting denoising strenght",
examples=["Portrait of a [gender]"],
default="Portrait of a [gender]",
)
inpainting_negative_prompt: str = Field(
description="Inpainting denoising strenght",
examples=[
"Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"
],
default="",
)
inpainting_steps: int = Field(
description="Inpainting steps",
examples=["Portrait of a [gender]"],
ge=1,
le=150,
default=20,
)
inpainting_sampler: str = Field(
description="Inpainting sampler", examples=["Euler"], default="Euler"
)
inpainting_when: InpaintingWhen = Field(
description="When inpainting happens",
examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER,
)
class FaceSwapRequest(BaseModel):
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
image: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions
class FaceSwapResponse(BaseModel):
images : List[str] = Field(description='base64 swapped image',default=None)
images: List[str] = Field(description="base64 swapped image", default=None)
infos: List[str]
@property
def pil_images(self):
return [base64_to_pil(img) for img in self.images]
def pil_to_base64(img):
if isinstance(img, str):
img = Image.open(img)
buffer = BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
img_data = buffer.getvalue()
base64_data = base64.b64encode(img_data)
return base64_data.decode('utf-8')
return base64_data.decode("utf-8")
def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]:
if base64str is None:
return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1]
if "base64," in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode

@ -1,39 +1,45 @@
import requests
from PIL import Image
from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64
from client_utils import (
FaceSwapRequest,
FaceSwapUnit,
PostProcessingOptions,
FaceSwapResponse,
pil_to_base64,
)
address = 'http://127.0.0.1:7860'
address = "http://127.0.0.1:7860"
# First face unit :
unit1 = FaceSwapUnit(
source_img=pil_to_base64("../../references/man.png"), # The face you want to use
faces_index=(0,) # Replace first face
faces_index=(0,), # Replace first face
)
# Second face unit :
unit2 = FaceSwapUnit(
source_img=pil_to_base64("../../references/woman.png"), # The face you want to use
same_gender=True,
faces_index=(0,) # Replace first woman since same gender is on
faces_index=(0,), # Replace first woman since same gender is on
)
# Post-processing config :
pp = PostProcessingOptions(
face_restorer_name="CodeFormer",
codeformer_weight=0.5,
restorer_visibility= 1)
face_restorer_name="CodeFormer", codeformer_weight=0.5, restorer_visibility=1
)
# Prepare the request
request = FaceSwapRequest(
image = pil_to_base64("test_image.png"),
units= [unit1, unit2],
postprocessing=pp
image=pil_to_base64("test_image.png"), units=[unit1, unit2], postprocessing=pp
)
result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"})
result = requests.post(
url=f"{address}/faceswaplab/swap_face",
data=request.json(),
headers={"Content-Type": "application/json; charset=utf-8"},
)
response = FaceSwapResponse.parse_obj(result.json())
for img, info in zip(response.pil_images, response.infos):
img.show(title=info)

@ -13,11 +13,21 @@ model_url = "https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.on
model_name = os.path.basename(model_url)
model_path = os.path.join(models_dir, model_name)
def download(url, path):
request = urllib.request.urlopen(url)
total = int(request.headers.get('Content-Length', 0))
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size))
total = int(request.headers.get("Content-Length", 0))
with tqdm(
total=total, desc="Downloading", unit="B", unit_scale=True, unit_divisor=1024
) as progress:
urllib.request.urlretrieve(
url,
path,
reporthook=lambda count, block_size, total_size: progress.update(
block_size
),
)
os.makedirs(models_dir, exist_ok=True)
os.makedirs(faces_dir, exist_ok=True)

@ -0,0 +1,7 @@
[mypy]
check_untyped_defs = True
disallow_any_generics = True
disallow_untyped_calls = True
disallow_untyped_defs = True
ignore_missing_imports = True
strict_optional = False

@ -2,9 +2,12 @@ import importlib
from scripts.faceswaplab_api import faceswaplab_api
from scripts.faceswaplab_settings import faceswaplab_settings
from scripts.faceswaplab_ui import faceswaplab_tab, faceswaplab_unit_ui
from scripts.faceswaplab_utils.models_utils import get_current_model, get_face_checkpoints
from scripts.faceswaplab_utils.models_utils import (
get_current_model,
get_face_checkpoints,
)
from scripts import (faceswaplab_globals)
from scripts import faceswaplab_globals
from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils import faceswaplab_logging, imgutils
from scripts.faceswaplab_utils import models_utils
@ -35,15 +38,20 @@ from modules import script_callbacks, scripts
from insightface.app.common import Face
from modules import scripts, shared
from modules.images import save_image, image_grid
from modules.processing import (Processed, StableDiffusionProcessing,
StableDiffusionProcessingImg2Img)
from modules.processing import (
Processed,
StableDiffusionProcessing,
StableDiffusionProcessingImg2Img,
)
from modules.shared import opts
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw)
from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts.faceswaplab_globals import VERSION_FLAG
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
@ -56,13 +64,13 @@ script_callbacks.on_ui_tabs(faceswaplab_tab.on_ui_tabs)
try:
import modules.script_callbacks as script_callbacks
script_callbacks.on_app_started(faceswaplab_api.faceswaplab_api)
except:
pass
class FaceSwapScript(scripts.Script):
def __init__(self) -> None:
logger.info(f"FaceSwapLab {VERSION_FLAG}")
super().__init__()
@ -102,7 +110,6 @@ class FaceSwapScript(scripts.Script):
def show(self, is_img2img):
return scripts.AlwaysVisible
def ui(self, is_img2img):
with gr.Accordion(f"FaceSwapLab {VERSION_FLAG}", open=False):
components = []
@ -148,25 +155,35 @@ class FaceSwapScript(scripts.Script):
if self.enabled:
p.do_not_save_samples = not self.keep_original_images
def process(self, p: StableDiffusionProcessing, *components):
self.read_config(p, *components)
# If is instance of img2img, we check if face swapping in source is required.
if isinstance(p, StableDiffusionProcessingImg2Img):
if self.enabled and len(self.swap_in_source_units) > 0:
init_images : List[Tuple[Optional[Image.Image], Optional[str]]] = [(img,None) for img in p.init_images]
new_inits = swapper.process_images_units(get_current_model(), self.swap_in_source_units,images=init_images, upscaled_swapper=self.upscaled_swapper_in_source,force_blend=True)
init_images: List[Tuple[Optional[Image.Image], Optional[str]]] = [
(img, None) for img in p.init_images
]
new_inits = swapper.process_images_units(
get_current_model(),
self.swap_in_source_units,
images=init_images,
upscaled_swapper=self.upscaled_swapper_in_source,
force_blend=True,
)
logger.info(f"processed init images: {len(init_images)}")
if new_inits is not None:
p.init_images = [img[0] for img in new_inits]
def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args):
if self.enabled:
# Get the original images without the grid
orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:]
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
orig_images: List[Image.Image] = processed.images[
processed.index_of_first_image :
]
orig_infotexts: List[str] = processed.infotexts[
processed.index_of_first_image :
]
keep_original = self.keep_original_images
@ -176,7 +193,12 @@ class FaceSwapScript(scripts.Script):
if (len(self.swap_in_generated_units)) > 0:
for i, (img, info) in enumerate(zip(orig_images, orig_infotexts)):
batch_index = i % p.batch_size
swapped_images = swapper.process_images_units(get_current_model(), self.swap_in_generated_units, images=[(img,info)], upscaled_swapper=self.upscaled_swapper_in_generated)
swapped_images = swapper.process_images_units(
get_current_model(),
self.swap_in_generated_units,
images=[(img, info)],
upscaled_swapper=self.upscaled_swapper_in_generated,
)
if swapped_images is None:
continue
@ -185,14 +207,15 @@ class FaceSwapScript(scripts.Script):
img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical)
if swp_img is not None:
save_img_debug(swp_img, "Before apply mask")
swp_img = imgutils.apply_mask(swp_img, p, batch_index)
save_img_debug(swp_img, "After apply mask")
try:
if self.postprocess_options is not None:
swp_img = enhance_image(swp_img, self.postprocess_options)
swp_img = enhance_image(
swp_img, self.postprocess_options
)
except Exception as e:
logger.error("Failed to upscale : %s", e)
@ -200,13 +223,22 @@ class FaceSwapScript(scripts.Script):
images.append(swp_img)
infotexts.append(new_info)
if p.outpath_samples and opts.samples_save:
save_image(swp_img, p.outpath_samples, "", p.all_seeds[batch_index], p.all_prompts[batch_index], opts.samples_format,info=new_info, p=p, suffix="-swapped")
save_image(
swp_img,
p.outpath_samples,
"",
p.all_seeds[batch_index],
p.all_prompts[batch_index],
opts.samples_format,
info=new_info,
p=p,
suffix="-swapped",
)
else:
logger.error("swp image is None")
else:
keep_original = True
# Generate grid :
if opts.return_grid and len(images) > 1:
# FIXME :Use sd method, not that if blended is not active, the result will be a bit messy.

@ -4,14 +4,22 @@ from fastapi import FastAPI, Body
from fastapi.exceptions import HTTPException
from modules.api.models import *
from modules.api import api
from scripts.faceswaplab_api.faceswaplab_api_types import FaceSwapUnit, FaceSwapRequest, FaceSwapResponse
from scripts.faceswaplab_api.faceswaplab_api_types import (
FaceSwapUnit,
FaceSwapRequest,
FaceSwapResponse,
)
from scripts.faceswaplab_globals import VERSION_FLAG
import gradio as gr
from typing import List, Optional
from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils.faceswaplab_logging import save_img_debug
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil)
from scripts.faceswaplab_utils.imgutils import (
pil_to_cv2,
check_against_nsfw,
base64_to_pil,
)
from scripts.faceswaplab_utils.models_utils import get_current_model
from modules.shared import opts
@ -26,18 +34,27 @@ def encode_to_base64(image):
else:
return ""
def encode_np_to_base64(image):
pil = Image.fromarray(image)
return api.encode_pil_to_base64(pil)
def faceswaplab_api(_: gr.Blocks, app: FastAPI):
@app.get("/faceswaplab/version", tags=["faceswaplab"], description="Get faceswaplab version")
@app.get(
"/faceswaplab/version",
tags=["faceswaplab"],
description="Get faceswaplab version",
)
async def version():
return {"version": VERSION_FLAG}
# use post as we consider the method non idempotent (which is debatable)
@app.post("/faceswaplab/swap_face", tags=["faceswaplab"], description="Swap a face in an image using units")
@app.post(
"/faceswaplab/swap_face",
tags=["faceswaplab"],
description="Swap a face in an image using units",
)
async def swap_face(request: FaceSwapRequest) -> FaceSwapResponse:
units: List[FaceSwapUnitSettings] = []
src_image: Optional[Image.Image] = base64_to_pil(request.image)
@ -45,7 +62,8 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI):
if src_image is not None:
for u in request.units:
units.append(
FaceSwapUnitSettings(source_img=base64_to_pil(u.source_img),
FaceSwapUnitSettings(
source_img=base64_to_pil(u.source_img),
source_face=u.source_face,
_batch_files=u.get_batch_images(),
blend_faces=u.blend_faces,
@ -57,14 +75,18 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI):
min_sim=u.min_sim,
_faces_index=",".join([str(i) for i in (u.faces_index)]),
swap_in_generated=True,
swap_in_source=False
swap_in_source=False,
)
)
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False))
swapped_images = swapper.process_images_units(
get_current_model(),
images=[(src_image, None)],
units=units,
upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False),
)
for img, info in swapped_images:
response.images.append(encode_to_base64(img))
response.infos.append(info)
return response

@ -5,39 +5,71 @@ import dill as pickle
import gradio as gr
from insightface.app.common import Face
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil)
from scripts.faceswaplab_utils.imgutils import (
pil_to_cv2,
check_against_nsfw,
base64_to_pil,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from pydantic import BaseModel, Field
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
class FaceSwapUnit(BaseModel):
# The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
source_img: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
# The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None)
source_face: str = Field(
description="face checkpoint (from models/faceswaplab/faces)",
examples=["my_face.pkl"],
default=None,
)
# base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
batch_images: Tuple[str] = Field(
description="list of base64 batch source images",
examples=[
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
],
default=None,
)
# Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True)
blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True)
same_gender: bool = Field(description="Use same gender filtering", default=True)
# If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False)
check_similarity: bool = Field(
description="If True, discard images with low similarity", default=False
)
# if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False)
compute_similarity: bool = Field(
description="If True will compute similarity and add it to the image info",
default=False,
)
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0)
min_sim: float = Field(
description="Minimum similarity against the used face (reference, batch or checkpoint)",
default=0.0,
)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0)
min_ref_sim: float = Field(
description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)",
default=0.0,
)
# The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,))
faces_index: Tuple[int] = Field(
description="The face index to use for swapping, list of face numbers starting from 0",
default=(0,),
)
def get_batch_images(self) -> List[Image.Image]:
images = []
@ -46,28 +78,64 @@ class FaceSwapUnit(BaseModel) :
images.append(base64_to_pil(img))
return images
class PostProcessingOptions(BaseModel):
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0)
face_restorer_name: str = Field(description="face restorer name", default=None)
restorer_visibility: float = Field(
description="face restorer visibility", default=1, le=1, ge=0
)
codeformer_weight: float = Field(
description="face restorer codeformer weight", default=1, le=1, ge=0
)
upscaler_name: str = Field(description='upscaler name', default=None)
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0)
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0)
upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscale_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0
)
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0)
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]")
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler")
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER)
inpainting_denoising_strengh: float = Field(
description="Inpainting denoising strenght", default=0, lt=1, ge=0
)
inpainting_prompt: str = Field(
description="Inpainting denoising strenght",
examples=["Portrait of a [gender]"],
default="Portrait of a [gender]",
)
inpainting_negative_prompt: str = Field(
description="Inpainting denoising strenght",
examples=[
"Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"
],
default="",
)
inpainting_steps: int = Field(
description="Inpainting steps",
examples=["Portrait of a [gender]"],
ge=1,
le=150,
default=20,
)
inpainting_sampler: str = Field(
description="Inpainting sampler", examples=["Euler"], default="Euler"
)
inpainting_when: InpaintingWhen = Field(
description="When inpainting happens",
examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER,
)
class FaceSwapRequest(BaseModel):
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
image: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions
class FaceSwapResponse(BaseModel):
images : List[str] = Field(description='base64 swapped image',default=None)
images: List[str] = Field(description="base64 swapped image", default=None)
infos: List[str]

@ -8,4 +8,3 @@ FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser"))
VERSION_FLAG = "v1.1.0"
EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab")
NSFW_SCORE = 0.7

@ -6,8 +6,11 @@ import numpy as np
from modules import shared
from scripts.faceswaplab_utils import imgutils
from modules import shared, processing, codeformer_model
from modules.processing import (StableDiffusionProcessingImg2Img)
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from modules.processing import StableDiffusionProcessingImg2Img
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
InpaintingWhen,
)
from modules import sd_models
from scripts.faceswaplab_swapping import swapper
@ -35,12 +38,17 @@ inpainting_steps : {pp.inpainting_steps}
for face in faces:
bbox = face.bbox.astype(int)
mask = imgutils.create_mask(img, bbox)
prompt = pp.inpainting_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
negative_prompt = pp.inpainting_negative_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
prompt = pp.inpainting_prompt.replace(
"[gender]", "man" if face["gender"] == 1 else "woman"
)
negative_prompt = pp.inpainting_negative_prompt.replace(
"[gender]", "man" if face["gender"] == 1 else "woman"
)
logger.info("Denoising prompt : %s", prompt)
logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh)
i2i_kwargs = {"sampler_name" :pp.inpainting_sampler,
i2i_kwargs = {
"sampler_name": pp.inpainting_sampler,
"do_not_save_samples": True,
"steps": pp.inpainting_steps,
"width": img.width,
@ -50,7 +58,8 @@ inpainting_steps : {pp.inpainting_steps}
"mask": mask,
"prompt": prompt,
"negative_prompt": negative_prompt,
"denoising_strength" :pp.inpainting_denoising_strengh}
"denoising_strength": pp.inpainting_denoising_strengh,
}
current_model_checkpoint = shared.opts.sd_model_checkpoint
if pp.inpainting_model and pp.inpainting_model != "Current":
# Change checkpoint
@ -72,5 +81,6 @@ inpainting_steps : {pp.inpainting_steps}
except Exception as e:
logger.error("Failed to apply img2img to face : %s", e)
import traceback
traceback.print_exc()
raise e

@ -1,7 +1,10 @@
from modules.face_restoration import FaceRestoration
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
InpaintingWhen,
)
from scripts.faceswaplab_postprocessing.i2i_pp import img2img_diffusion
from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_face

@ -4,12 +4,14 @@ from dataclasses import dataclass
from modules import shared
from enum import Enum
class InpaintingWhen(Enum):
NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All"
@dataclass
class PostProcessingOptions:
face_restorer_name: str = ""

@ -1,10 +1,13 @@
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
InpaintingWhen,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image
import numpy as np
from modules import shared, processing, codeformer_model
def upscale_img(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
if pp_options.upscaler is not None and pp_options.upscaler.name != "None":
original_image = image.copy()
@ -23,14 +26,16 @@ def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image
return result_image
return image
def restore_face(image : Image.Image, pp_options : PostProcessingOptions) -> Image.Image :
def restore_face(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
if pp_options.face_restorer is not None:
original_image = image.copy()
logger.info("Restore face with %s", pp_options.face_restorer.name())
numpy_image = np.array(image)
if pp_options.face_restorer_name == "CodeFormer":
numpy_image = codeformer_model.codeformer.restore(numpy_image, w=pp_options.codeformer_weight)
numpy_image = codeformer_model.codeformer.restore(
numpy_image, w=pp_options.codeformer_weight
)
else:
numpy_image = pp_options.face_restorer.restore(numpy_image)

@ -2,52 +2,215 @@ from scripts.faceswaplab_utils.models_utils import get_models
from modules import script_callbacks, shared
import gradio as gr
def on_ui_settings():
section = ('faceswaplab', "FaceSwapLab")
section = ("faceswaplab", "FaceSwapLab")
models = get_models()
shared.opts.add_option("faceswaplab_model", shared.OptionInfo(
models[0] if len(models) > 0 else "None", "FaceSwapLab FaceSwap Model", gr.Dropdown, {"interactive": True, "choices" : models}, section=section))
shared.opts.add_option("faceswaplab_keep_original", shared.OptionInfo(
False, "keep original image before swapping", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_units_count", shared.OptionInfo(
3, "Max faces units (requires restart)", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}, section=section))
shared.opts.add_option("faceswaplab_detection_threshold", shared.OptionInfo(
0.5, "Detection threshold ", gr.Slider, {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, section=section))
shared.opts.add_option(
"faceswaplab_model",
shared.OptionInfo(
models[0] if len(models) > 0 else "None",
"FaceSwapLab FaceSwap Model",
gr.Dropdown,
{"interactive": True, "choices": models},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_keep_original",
shared.OptionInfo(
False,
"keep original image before swapping",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_units_count",
shared.OptionInfo(
3,
"Max faces units (requires restart)",
gr.Slider,
{"minimum": 1, "maximum": 10, "step": 1},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_detection_threshold",
shared.OptionInfo(
0.5,
"Detection threshold ",
gr.Slider,
{"minimum": 0.1, "maximum": 0.99, "step": 0.001},
section=section,
),
)
shared.opts.add_option("faceswaplab_pp_default_face_restorer", shared.OptionInfo(
None, "UI Default post processing face restorer (requires restart)", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("faceswaplab_pp_default_face_restorer_visibility", shared.OptionInfo(
1, "UI Default post processing face restorer visibility (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_pp_default_face_restorer_weight", shared.OptionInfo(
1, "UI Default post processing face restorer weight (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_pp_default_upscaler", shared.OptionInfo(
None, "UI Default post processing upscaler (requires restart)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section))
shared.opts.add_option("faceswaplab_pp_default_upscaler_visibility", shared.OptionInfo(
1, "UI Default post processing upscaler visibility(requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option(
"faceswaplab_pp_default_face_restorer",
shared.OptionInfo(
None,
"UI Default post processing face restorer (requires restart)",
gr.Dropdown,
{
"interactive": True,
"choices": ["None"] + [x.name() for x in shared.face_restorers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_face_restorer_visibility",
shared.OptionInfo(
1,
"UI Default post processing face restorer visibility (requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_face_restorer_weight",
shared.OptionInfo(
1,
"UI Default post processing face restorer weight (requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_upscaler",
shared.OptionInfo(
None,
"UI Default post processing upscaler (requires restart)",
gr.Dropdown,
{
"interactive": True,
"choices": [upscaler.name for upscaler in shared.sd_upscalers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_pp_default_upscaler_visibility",
shared.OptionInfo(
1,
"UI Default post processing upscaler visibility(requires restart)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper",
shared.OptionInfo(
False,
"Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_upscaler",
shared.OptionInfo(
None,
"Upscaled swapper upscaler (Recommanded : LDSR but slow)",
gr.Dropdown,
{
"interactive": True,
"choices": [upscaler.name for upscaler in shared.sd_upscalers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_sharpen",
shared.OptionInfo(
False,
"Upscaled swapper sharpen",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_fixcolor",
shared.OptionInfo(
False,
"Upscaled swapper color correction",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_improved_mask",
shared.OptionInfo(
True,
"Use improved segmented mask (use pastenet to mask only the face)",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer",
shared.OptionInfo(
None,
"Upscaled swapper face restorer",
gr.Dropdown,
{
"interactive": True,
"choices": ["None"] + [x.name() for x in shared.face_restorers],
},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer_visibility",
shared.OptionInfo(
1,
"Upscaled swapper face restorer visibility",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_face_restorer_weight",
shared.OptionInfo(
1,
"Upscaled swapper face restorer weight (codeformer)",
gr.Slider,
{"minimum": 0, "maximum": 1, "step": 0.001},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_fthresh",
shared.OptionInfo(
10,
"Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.",
gr.Slider,
{"minimum": 5, "maximum": 250, "step": 1},
section=section,
),
)
shared.opts.add_option(
"faceswaplab_upscaled_swapper_erosion",
shared.OptionInfo(
1,
"Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.",
gr.Slider,
{"minimum": 0, "maximum": 10, "step": 0.001},
section=section,
),
)
shared.opts.add_option("faceswaplab_upscaled_swapper", shared.OptionInfo(
False, "Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_upscaler", shared.OptionInfo(
None, "Upscaled swapper upscaler (Recommanded : LDSR but slow)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_sharpen", shared.OptionInfo(
False, "Upscaled swapper sharpen", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fixcolor", shared.OptionInfo(
False, "Upscaled swapper color correction", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_improved_mask", shared.OptionInfo(
True, "Use improved segmented mask (use pastenet to mask only the face)", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer", shared.OptionInfo(
None, "Upscaled swapper face restorer", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_visibility", shared.OptionInfo(
1, "Upscaled swapper face restorer visibility", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_weight", shared.OptionInfo(
1, "Upscaled swapper face restorer weight (codeformer)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fthresh", shared.OptionInfo(
10, "Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.", gr.Slider, {"minimum": 5, "maximum": 250, "step": 1}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_erosion", shared.OptionInfo(
1, "Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.", gr.Slider, {"minimum": 0, "maximum": 10, "step": 0.001}, section=section))
script_callbacks.on_ui_settings(on_ui_settings)

@ -7,6 +7,7 @@ from functools import lru_cache
from typing import Union, List
from torch import device as torch_device
@lru_cache
def get_parsing_model(device: torch_device) -> torch.nn.Module:
"""
@ -21,7 +22,12 @@ def get_parsing_model(device: torch_device) -> torch.nn.Module:
"""
return init_parsing_model(device=device)
def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert_bgr_to_rgb: bool = True, use_float32: bool = True) -> Union[torch.Tensor, List[torch.Tensor]]:
def convert_image_to_tensor(
images: Union[np.ndarray, List[np.ndarray]],
convert_bgr_to_rgb: bool = True,
use_float32: bool = True,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""
Converts an image or a list of images to PyTorch tensor.
@ -33,10 +39,13 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert
Returns:
PyTorch tensor or a list of PyTorch tensors.
"""
def _convert_single_image_to_tensor(image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool) -> torch.Tensor:
def _convert_single_image_to_tensor(
image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool
) -> torch.Tensor:
if image.shape[2] == 3 and convert_bgr_to_rgb:
if image.dtype == 'float64':
image = image.astype('float32')
if image.dtype == "float64":
image = image.astype("float32")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_tensor = torch.from_numpy(image.transpose(2, 0, 1))
if use_float32:
@ -44,10 +53,14 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert
return image_tensor
if isinstance(images, list):
return [_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32) for image in images]
return [
_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32)
for image in images
]
else:
return _convert_single_image_to_tensor(images, convert_bgr_to_rgb, use_float32)
def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarray:
"""
Generates a face mask given a face image.
@ -60,10 +73,16 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr
The face mask as a numpy.ndarray.
"""
# Resize the face image for the model
resized_face_image = cv2.resize(face_image, (512, 512), interpolation=cv2.INTER_LINEAR)
resized_face_image = cv2.resize(
face_image, (512, 512), interpolation=cv2.INTER_LINEAR
)
# Preprocess the image
face_input = convert_image_to_tensor((resized_face_image.astype('float32') / 255.0), convert_bgr_to_rgb=True, use_float32=True)
face_input = convert_image_to_tensor(
(resized_face_image.astype("float32") / 255.0),
convert_bgr_to_rgb=True,
use_float32=True,
)
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
assert isinstance(face_input, torch.Tensor)
face_input = torch.unsqueeze(face_input, 0).to(device)
@ -75,7 +94,27 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr
# Generate the mask from the model output
parse_mask = np.zeros(model_output.shape)
MASK_COLOR_MAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
MASK_COLOR_MAP = [
0,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
0,
255,
0,
0,
0,
]
for idx, color in enumerate(MASK_COLOR_MAP):
parse_mask[model_output == idx] = color

@ -50,12 +50,12 @@ from scripts.faceswaplab_globals import FACE_PARSER_DIR
ROOT_DIR = FACE_PARSER_DIR
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
"""
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py"""
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
model_dir = os.path.join(hub_dir, "checkpoints")
os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True)
@ -70,10 +70,12 @@ def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
return cached_file
def init_parsing_model(device='cuda'):
def init_parsing_model(device="cuda"):
model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth'
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None)
model_url = "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth"
model_path = load_file_from_url(
url=model_url, model_dir="weights/facelib", progress=True, file_name=None
)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(load_net, strict=True)
model.eval()

@ -499,27 +499,27 @@ class NormLayer(nn.Module):
input_size: input shape without batch size, for layer norm.
"""
def __init__(self, channels, normalize_shape=None, norm_type='bn'):
def __init__(self, channels, normalize_shape=None, norm_type="bn"):
super(NormLayer, self).__init__()
norm_type = norm_type.lower()
self.norm_type = norm_type
if norm_type == 'bn':
if norm_type == "bn":
self.norm = nn.BatchNorm2d(channels, affine=True)
elif norm_type == 'in':
elif norm_type == "in":
self.norm = nn.InstanceNorm2d(channels, affine=False)
elif norm_type == 'gn':
elif norm_type == "gn":
self.norm = nn.GroupNorm(32, channels, affine=True)
elif norm_type == 'pixel':
elif norm_type == "pixel":
self.norm = lambda x: F.normalize(x, p=2, dim=1)
elif norm_type == 'layer':
elif norm_type == "layer":
self.norm = nn.LayerNorm(normalize_shape)
elif norm_type == 'none':
elif norm_type == "none":
self.norm = lambda x: x * 1.0
else:
assert 1 == 0, f'Norm type {norm_type} not support.'
assert 1 == 0, f"Norm type {norm_type} not support."
def forward(self, x, ref=None):
if self.norm_type == 'spade':
if self.norm_type == "spade":
return self.norm(x, ref)
else:
return self.norm(x)
@ -537,51 +537,56 @@ class ReluLayer(nn.Module):
- none: direct pass
"""
def __init__(self, channels, relu_type='relu'):
def __init__(self, channels, relu_type="relu"):
super(ReluLayer, self).__init__()
relu_type = relu_type.lower()
if relu_type == 'relu':
if relu_type == "relu":
self.func = nn.ReLU(True)
elif relu_type == 'leakyrelu':
elif relu_type == "leakyrelu":
self.func = nn.LeakyReLU(0.2, inplace=True)
elif relu_type == 'prelu':
elif relu_type == "prelu":
self.func = nn.PReLU(channels)
elif relu_type == 'selu':
elif relu_type == "selu":
self.func = nn.SELU(True)
elif relu_type == 'none':
elif relu_type == "none":
self.func = lambda x: x * 1.0
else:
assert 1 == 0, f'Relu type {relu_type} not support.'
assert 1 == 0, f"Relu type {relu_type} not support."
def forward(self, x):
return self.func(x)
class ConvLayer(nn.Module):
def __init__(self,
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
scale='none',
norm_type='none',
relu_type='none',
scale="none",
norm_type="none",
relu_type="none",
use_pad=True,
bias=True):
bias=True,
):
super(ConvLayer, self).__init__()
self.use_pad = use_pad
self.norm_type = norm_type
if norm_type in ['bn']:
if norm_type in ["bn"]:
bias = False
stride = 2 if scale == 'down' else 1
stride = 2 if scale == "down" else 1
self.scale_func = lambda x: x
if scale == 'up':
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
if scale == "up":
self.scale_func = lambda x: nn.functional.interpolate(
x, scale_factor=2, mode="nearest"
)
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.0) / 2)))
self.conv2d = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, bias=bias
)
self.relu = ReluLayer(out_channels, relu_type)
self.norm = NormLayer(out_channels, norm_type=norm_type)
@ -601,19 +606,27 @@ class ResidualBlock(nn.Module):
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
def __init__(self, c_in, c_out, relu_type="prelu", norm_type="bn", scale="none"):
super(ResidualBlock, self).__init__()
if scale == 'none' and c_in == c_out:
if scale == "none" and c_in == c_out:
self.shortcut_func = lambda x: x
else:
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
scale_config_dict = {
"down": ["none", "down"],
"up": ["up", "none"],
"none": ["none", "none"],
}
scale_conf = scale_config_dict[scale]
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
self.conv1 = ConvLayer(
c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type
)
self.conv2 = ConvLayer(
c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type="none"
)
def forward(self, x):
identity = self.shortcut_func(x)
@ -624,20 +637,21 @@ class ResidualBlock(nn.Module):
class ParseNet(nn.Module):
def __init__(self,
def __init__(
self,
in_size=128,
out_size=128,
min_feat_size=32,
base_ch=64,
parsing_ch=19,
res_depth=10,
relu_type='LeakyReLU',
norm_type='bn',
ch_range=[32, 256]):
relu_type="LeakyReLU",
norm_type="bn",
ch_range=[32, 256],
):
super().__init__()
self.res_depth = res_depth
act_args = {'norm_type': norm_type, 'relu_type': relu_type}
act_args = {"norm_type": norm_type, "relu_type": relu_type}
min_ch, max_ch = ch_range
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
@ -652,17 +666,19 @@ class ParseNet(nn.Module):
head_ch = base_ch
for i in range(down_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
self.encoder.append(ResidualBlock(cin, cout, scale="down", **act_args))
head_ch = head_ch * 2
self.body = []
for i in range(res_depth):
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
self.body.append(
ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)
)
self.decoder = []
for i in range(up_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
self.decoder.append(ResidualBlock(cin, cout, scale="up", **act_args))
head_ch = head_ch // 2
self.encoder = nn.Sequential(*self.encoder)

@ -12,7 +12,11 @@ from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
from scripts.faceswaplab_swapping import upscaled_inswapper
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.imgutils import (
cv2_to_pil,
pil_to_cv2,
check_against_nsfw,
)
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts import faceswaplab_globals
from modules.shared import opts
@ -48,6 +52,7 @@ def cosine_similarity_face(face1, face2) -> float:
# Return the maximum of 0 and the calculated similarity as the final similarity score
return max(0, similarity[0, 0])
def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
"""
Compares the similarity between two faces extracted from images using cosine similarity.
@ -76,6 +81,7 @@ def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
class FaceModelException(Exception):
"""Exception raised when an error is encountered in the face model."""
def __init__(self, message: str) -> None:
"""
Args:
@ -84,6 +90,7 @@ class FaceModelException(Exception):
self.message = message
super().__init__(self.message)
@lru_cache(maxsize=1)
def getAnalysisModel():
"""
@ -102,9 +109,12 @@ def getAnalysisModel():
name="buffalo_l", providers=providers, root=faceswaplab_globals.ANALYZER_DIR
)
except Exception as e:
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
logger.error(
"Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)"
)
raise FaceModelException("Loading of swapping model failed")
@lru_cache(maxsize=1)
def getFaceSwapModel(model_path: str):
"""
@ -118,12 +128,21 @@ def getFaceSwapModel(model_path: str):
"""
try:
# Initializes the face swap model using the specified model path.
return upscaled_inswapper.UpscaledINSwapper(insightface.model_zoo.get_model(model_path, providers=providers))
return upscaled_inswapper.UpscaledINSwapper(
insightface.model_zoo.get_model(model_path, providers=providers)
)
except Exception as e:
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
logger.error(
"Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)"
)
def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[int]=None, sort_by_face_size = False) -> List[Face]:
def get_faces(
img_data: np.ndarray,
det_size=(640, 640),
det_thresh: Optional[int] = None,
sort_by_face_size=False,
) -> List[Face]:
"""
Detects and retrieves faces from an image using an analysis model.
@ -156,7 +175,11 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i
try:
if sort_by_face_size:
return sorted(face, reverse=True, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
return sorted(
face,
reverse=True,
key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]),
)
# Sort the detected faces based on their x-coordinate of the bounding box
return sorted(face, key=lambda x: x.bbox[0])
@ -164,7 +187,6 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i
return []
@dataclass
class ImageResult:
"""
@ -222,12 +244,15 @@ def get_faces_from_img_files(files):
if len(files) > 0:
for file in files:
img = Image.open(file.name) # Open the image file
face = get_or_default(get_faces(pil_to_cv2(img)), 0, None) # Extract faces from the image
face = get_or_default(
get_faces(pil_to_cv2(img)), 0, None
) # Extract faces from the image
if face is not None:
faces.append(face) # Add the detected face to the list of faces
return faces
def blend_faces(faces: List[Face]) -> Face:
"""
Blends the embeddings of multiple faces into a single face.
@ -258,9 +283,15 @@ def blend_faces(faces: List[Face]) -> Face:
# Create a new Face object using the properties of the first face in the list
# Assign the blended embedding to the blended Face object
blended = Face(embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age)
blended = Face(
embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age
)
assert not np.array_equal(blended.embedding,faces[0].embedding) if len(faces) > 1 else True, "If len(faces)>0, the blended embedding should not be the same than the first image"
assert (
not np.array_equal(blended.embedding, faces[0].embedding)
if len(faces) > 1
else True
), "If len(faces)>0, the blended embedding should not be the same than the first image"
return blended
@ -277,7 +308,7 @@ def swap_face(
same_gender=True,
upscaled_swapper=False,
compute_similarity=True,
sort_by_face_size = False
sort_by_face_size=False,
) -> ImageResult:
"""
Swaps faces in the target image with the source face.
@ -313,19 +344,23 @@ def swap_face(
for i, swapped_face in enumerate(target_faces):
logger.info(f"swap face {i}")
if i in faces_index:
result = face_swapper.get(result, swapped_face, source_face, upscale = upscaled_swapper)
result = face_swapper.get(
result, swapped_face, source_face, upscale=upscaled_swapper
)
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
return_result.image = result_image
if compute_similarity:
try:
result_faces = get_faces(
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR), sort_by_face_size=sort_by_face_size
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR),
sort_by_face_size=sort_by_face_size,
)
if same_gender:
result_faces = [x for x in result_faces if x["gender"] == gender]
result_faces = [
x for x in result_faces if x["gender"] == gender
]
for i, swapped_face in enumerate(result_faces):
logger.info(f"compare face {i}")
@ -349,7 +384,14 @@ def swap_face(
return return_result
def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, info = None, upscaled_swapper = False, force_blend = False) -> List:
def process_image_unit(
model,
unit: FaceSwapUnitSettings,
image: Image.Image,
info=None,
upscaled_swapper=False,
force_blend=False,
) -> List:
"""Process one image and return a List of (image, info) (one if blended, many if not).
Args:
@ -371,8 +413,13 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
else:
logger.info("blend all faces together")
src_faces = [unit.blended_faces]
assert(not np.array_equal(unit.reference_face.embedding,src_faces[0].embedding) if len(unit.faces)>1 else True), "Reference face cannot be the same as blended"
assert (
not np.array_equal(
unit.reference_face.embedding, src_faces[0].embedding
)
if len(unit.faces) > 1
else True
), "Reference face cannot be the same as blended"
for i, src_face in enumerate(src_faces):
logger.info(f"Process face {i}")
@ -392,14 +439,30 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
same_gender=unit.same_gender,
upscaled_swapper=upscaled_swapper,
compute_similarity=unit.compute_similarity,
sort_by_face_size=unit.sort_by_size
sort_by_face_size=unit.sort_by_size,
)
save_img_debug(result.image, "After swap")
if result.image is None:
logger.error("Result image is None")
if (not unit.check_similarity) or result.similarity and all([result.similarity.values()!=0]+[x >= unit.min_sim for x in result.similarity.values()]) and all([result.ref_similarity.values()!=0]+[x >= unit.min_ref_sim for x in result.ref_similarity.values()]):
results.append((result.image, f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}"))
if (
(not unit.check_similarity)
or result.similarity
and all(
[result.similarity.values() != 0]
+ [x >= unit.min_sim for x in result.similarity.values()]
)
and all(
[result.ref_similarity.values() != 0]
+ [x >= unit.min_ref_sim for x in result.ref_similarity.values()]
)
):
results.append(
(
result.image,
f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}",
)
)
else:
logger.warning(
f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})"
@ -407,7 +470,14 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i
logger.debug("process_image_unit : Unit produced %s results", len(results))
return results
def process_images_units(model, units : List[FaceSwapUnitSettings], images: List[Tuple[Optional[Image.Image], Optional[str]]], upscaled_swapper = False, force_blend = False) -> Union[List,None]:
def process_images_units(
model,
units: List[FaceSwapUnitSettings],
images: List[Tuple[Optional[Image.Image], Optional[str]]],
upscaled_swapper=False,
force_blend=False,
) -> Union[List, None]:
if len(units) == 0:
logger.info("Finished processing image, return %s images", len(images))
return None
@ -417,9 +487,13 @@ def process_images_units(model, units : List[FaceSwapUnitSettings], images: List
processed_images = []
for i, (image, info) in enumerate(images):
logger.debug("Processing image %s", i)
swapped = process_image_unit(model,units[0],image, info, upscaled_swapper, force_blend)
swapped = process_image_unit(
model, units[0], image, info, upscaled_swapper, force_blend
)
logger.debug("Image %s -> %s images", i, len(swapped))
nexts = process_images_units(model,units[1:],swapped, upscaled_swapper,force_blend)
nexts = process_images_units(
model, units[1:], swapped, upscaled_swapper, force_blend
)
if nexts:
processed_images.extend(nexts)
else:

@ -1,4 +1,3 @@
import cv2
import numpy as np
import onnx
@ -14,18 +13,22 @@ from PIL import Image
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from scripts.faceswaplab_postprocessing import upscaling
from scripts.faceswaplab_postprocessing.postprocessing_options import \
PostProcessingOptions
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_swapping.facemask import generate_face_mask
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2
def get_upscaler() -> UpscalerData:
for upscaler in shared.sd_upscalers:
if upscaler.name == opts.data.get("faceswaplab_upscaled_swapper_upscaler", "LDSR"):
if upscaler.name == opts.data.get(
"faceswaplab_upscaled_swapper_upscaler", "LDSR"
):
return upscaler
return None
def merge_images_with_mask(image1, image2, mask):
if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
raise ValueError("Img should have the same shape")
@ -36,20 +39,24 @@ def merge_images_with_mask(image1, image2, mask):
merged_image = cv2.add(empty_region, masked_region)
return merged_image
def erode_mask(mask, kernel_size=3, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
eroded_mask = cv2.erode(mask, kernel, iterations=iterations)
return eroded_mask
def apply_gaussian_blur(mask, kernel_size=(5, 5), sigma_x=0):
blurred_mask = cv2.GaussianBlur(mask, kernel_size, sigma_x)
return blurred_mask
def dilate_mask(mask, kernel_size=5, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
dilated_mask = cv2.dilate(mask, kernel, iterations=iterations)
return dilated_mask
def get_face_mask(aimg, bgr_fake):
mask1 = generate_face_mask(aimg, device=shared.device)
mask2 = generate_face_mask(bgr_fake, device=shared.device)
@ -57,36 +64,54 @@ def get_face_mask(aimg,bgr_fake):
return mask
class UpscaledINSwapper():
class UpscaledINSwapper:
def __init__(self, inswapper: INSwapper):
self.__dict__.update(inswapper.__dict__)
def forward(self, img, latent):
img = (img - self.input_mean) / self.input_std
pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
pred = self.session.run(
self.output_names, {self.input_names[0]: img, self.input_names[1]: latent}
)[0]
return pred
def super_resolution(self, img, k=2):
pil_img = cv2_to_pil(img)
options = PostProcessingOptions(
upscaler_name=opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR'),
upscaler_name=opts.data.get(
"faceswaplab_upscaled_swapper_upscaler", "LDSR"
),
upscale_visibility=1,
scale=k,
face_restorer_name=opts.data.get('faceswaplab_upscaled_swapper_face_restorer', ""),
codeformer_weight= opts.data.get('faceswaplab_upscaled_swapper_face_restorer_weight', 1),
restorer_visibility=opts.data.get('faceswaplab_upscaled_swapper_face_restorer_visibility', 1))
face_restorer_name=opts.data.get(
"faceswaplab_upscaled_swapper_face_restorer", ""
),
codeformer_weight=opts.data.get(
"faceswaplab_upscaled_swapper_face_restorer_weight", 1
),
restorer_visibility=opts.data.get(
"faceswaplab_upscaled_swapper_face_restorer_visibility", 1
),
)
upscaled = upscaling.upscale_img(pil_img, options)
upscaled = upscaling.restore_face(upscaled, options)
return pil_to_cv2(upscaled)
def get(self, img, target_face, source_face, paste_back=True, upscale=True):
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
blob = cv2.dnn.blobFromImage(
aimg,
1.0 / self.input_std,
self.input_size,
(self.input_mean, self.input_mean, self.input_mean),
swapRB=True,
)
latent = source_face.normed_embedding.reshape((1, -1))
latent = np.dot(latent, self.emap)
latent /= np.linalg.norm(latent)
pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
pred = self.session.run(
self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent}
)[0]
# print(latent.shape, latent.dtype, pred.shape)
img_fake = pred.transpose((0, 2, 3, 1))[0]
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:, :, ::-1]
@ -107,13 +132,16 @@ class UpscaledINSwapper():
return fake_diff
if upscale:
print("*" * 80)
print(f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}")
print(
f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}"
)
print("*" * 80)
k = 4
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]*k)
aimg, M = face_align.norm_crop2(
img, target_face.kps, self.input_size[0] * k
)
# upscale and restore face :
bgr_fake = self.super_resolution(bgr_fake, k)
@ -135,21 +163,39 @@ class UpscaledINSwapper():
if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True):
print("color correction")
correction = processing.setup_color_correction(cv2_to_pil(aimg))
bgr_fake_pil = processing.apply_color_correction(correction, cv2_to_pil(bgr_fake))
bgr_fake_pil = processing.apply_color_correction(
correction, cv2_to_pil(bgr_fake)
)
bgr_fake = pil_to_cv2(bgr_fake_pil)
else:
fake_diff = compute_diff(bgr_fake, aimg)
IM = cv2.invertAffineTransform(M)
img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
img_white = np.full(
(aimg.shape[0], aimg.shape[1]), 255, dtype=np.float32
)
bgr_fake = cv2.warpAffine(
bgr_fake,
IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
img_white = cv2.warpAffine(
img_white,
IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
fake_diff = cv2.warpAffine(
fake_diff,
IM,
(target_img.shape[1], target_img.shape[0]),
borderValue=0.0,
)
img_white[img_white > 20] = 255
fthresh = opts.data.get('faceswaplab_upscaled_swapper_fthresh', 10)
fthresh = opts.data.get("faceswaplab_upscaled_swapper_fthresh", 10)
print("fthresh", fthresh)
fake_diff[fake_diff < fthresh] = 0
fake_diff[fake_diff >= fthresh] = 255
@ -158,7 +204,9 @@ class UpscaledINSwapper():
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
mask_size = int(np.sqrt(mask_h * mask_w))
erosion_factor = opts.data.get('faceswaplab_upscaled_swapper_erosion', 1)
erosion_factor = opts.data.get(
"faceswaplab_upscaled_swapper_erosion", 1
)
k = max(int(mask_size // 10 * erosion_factor), int(10 * erosion_factor))
kernel = np.ones((k, k), np.uint8)
@ -167,7 +215,6 @@ class UpscaledINSwapper():
fake_diff = cv2.dilate(fake_diff, kernel, iterations=1)
k = max(int(mask_size // 20 * erosion_factor), int(5 * erosion_factor))
kernel_size = (k, k)
blur_size = tuple(2 * i + 1 for i in kernel_size)
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
@ -178,11 +225,16 @@ class UpscaledINSwapper():
img_mask /= 255
fake_diff /= 255
img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
img_mask = np.reshape(
img_mask, [img_mask.shape[0], img_mask.shape[1], 1]
)
fake_merged = img_mask * bgr_fake + (1 - img_mask) * target_img.astype(
np.float32
)
fake_merged = fake_merged.astype(np.uint8)
return fake_merged
except Exception as e:
import traceback
traceback.print_exc()
raise e

@ -20,13 +20,16 @@ from scripts.faceswaplab_utils.imgutils import pil_to_cv2
from scripts.faceswaplab_utils.models_utils import get_models
from scripts.faceswaplab_utils.faceswaplab_logging import logger
import scripts.faceswaplab_swapping.swapper as swapper
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from dataclasses import fields
from typing import List
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.models_utils import get_current_model
def compare(img1, img2):
if img1 is not None and img2 is not None:
return swapper.compare_faces(img1, img2)
@ -34,8 +37,22 @@ def compare(img1, img2):
return "You need 2 images to compare"
def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibility, codeformer_weight,upscaler_name,upscaler_scale, upscaler_visibility,inpainting_denoising_strengh, inpainting_prompt, inpainting_negative_prompt, inpainting_steps, inpainting_sampler,inpainting_when):
def extract_faces(
files,
extract_path,
face_restorer_name,
face_restorer_visibility,
codeformer_weight,
upscaler_name,
upscaler_scale,
upscaler_visibility,
inpainting_denoising_strengh,
inpainting_prompt,
inpainting_negative_prompt,
inpainting_steps,
inpainting_sampler,
inpainting_when,
):
if not extract_path:
tempfile.mkdtemp()
if files is not None:
@ -51,7 +68,10 @@ def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibi
face_image = img.crop((x_min, y_min, x_max, y_max))
if face_restorer_name or face_restorer_visibility:
scale = 1 if face_image.width > 512 else 512 // face_image.width
face_image = enhance_image(face_image, PostProcessingOptions(face_restorer_name=face_restorer_name,
face_image = enhance_image(
face_image,
PostProcessingOptions(
face_restorer_name=face_restorer_name,
restorer_visibility=face_restorer_visibility,
codeformer_weight=codeformer_weight,
upscaler_name=upscaler_name,
@ -62,14 +82,19 @@ def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibi
inpainting_steps=inpainting_steps,
inpainting_negative_prompt=inpainting_negative_prompt,
inpainting_when=inpainting_when,
inpainting_sampler=inpainting_sampler))
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=extract_path).name
inpainting_sampler=inpainting_sampler,
),
)
path = tempfile.NamedTemporaryFile(
delete=False, suffix=".png", dir=extract_path
).name
face_image.save(path)
face_images.append(path)
images += face_images
return images
return None
def analyse_faces(image, det_threshold=0.5):
try:
faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold)
@ -84,6 +109,7 @@ def analyse_faces(image, det_threshold = 0.5) :
logger.error("Analysis Failed : %s", e)
return "Analysis Failed"
def build_face_checkpoint_and_save(batch_files, name):
"""
Builds a face checkpoint, swaps faces, and saves the result to a file.
@ -116,8 +142,15 @@ def build_face_checkpoint_and_save(batch_files, name):
if name == "":
name = "default_name"
pprint(blended_face)
result = swapper.swap_face(blended_face, blended_face, target_img, get_models()[0])
result_image = enhance_image(result.image, PostProcessingOptions(face_restorer_name="CodeFormer", restorer_visibility=1))
result = swapper.swap_face(
blended_face, blended_face, target_img, get_models()[0]
)
result_image = enhance_image(
result.image,
PostProcessingOptions(
face_restorer_name="CodeFormer", restorer_visibility=1
),
)
file_path = os.path.join(faces_path, f"{name}.pkl")
file_number = 1
@ -126,7 +159,14 @@ def build_face_checkpoint_and_save(batch_files, name):
file_number += 1
result_image.save(file_path + ".png")
with open(file_path, "wb") as file:
pickle.dump({"embedding" :blended_face.embedding, "gender" :blended_face.gender, "age" :blended_face.age},file)
pickle.dump(
{
"embedding": blended_face.embedding,
"gender": blended_face.gender,
"age": blended_face.age,
},
file,
)
try:
with open(file_path, "rb") as file:
data = Face(pickle.load(file))
@ -139,31 +179,35 @@ def build_face_checkpoint_and_save(batch_files, name):
return target_img
def explore_onnx_faceswap_model(model_path):
data = {
'Node Name': [],
'Op Type': [],
'Inputs': [],
'Outputs': [],
'Attributes': []
"Node Name": [],
"Op Type": [],
"Inputs": [],
"Outputs": [],
"Attributes": [],
}
if model_path:
model = onnx.load(model_path)
for node in model.graph.node:
data['Node Name'].append(pformat(node.name))
data['Op Type'].append(pformat(node.op_type))
data['Inputs'].append(pformat(node.input))
data['Outputs'].append(pformat(node.output))
data["Node Name"].append(pformat(node.name))
data["Op Type"].append(pformat(node.op_type))
data["Inputs"].append(pformat(node.input))
data["Outputs"].append(pformat(node.output))
attributes = []
for attr in node.attribute:
attr_name = attr.name
attr_value = attr.t
attributes.append("{} = {}".format(pformat(attr_name), pformat(attr_value)))
data['Attributes'].append(attributes)
attributes.append(
"{} = {}".format(pformat(attr_name), pformat(attr_value))
)
data["Attributes"].append(attributes)
df = pd.DataFrame(data)
return df
def batch_process(files, save_path, *components):
try:
if save_path is not None:
@ -194,7 +238,14 @@ def batch_process(files, save_path, *components):
for file in files:
current_images = []
src_image = Image.open(file.name).convert("RGB")
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False))
swapped_images = swapper.process_images_units(
get_current_model(),
images=[(src_image, None)],
units=units,
upscaled_swapper=opts.data.get(
"faceswaplab_upscaled_swapper", False
),
)
if len(swapped_images) > 0:
current_images += [img for img, info in swapped_images]
@ -203,7 +254,9 @@ def batch_process(files, save_path, *components):
current_images[i] = enhance_image(img, postprocess_options)
for img in current_images:
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=save_path).name
path = tempfile.NamedTemporaryFile(
delete=False, suffix=".png", dir=save_path
).name
img.save(path)
images += current_images
@ -211,6 +264,7 @@ def batch_process(files, save_path, *components):
except Exception as e:
logger.error("Batch Process error : %s", e)
import traceback
traceback.print_exc()
return None
@ -220,107 +274,164 @@ def tools_ui():
with gr.Tab("Tools"):
with gr.Tab("Build"):
gr.Markdown(
"""Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.""")
"""Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory."""
)
with gr.Row():
batch_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_build_batch_files"
elem_id="faceswaplab_build_batch_files",
)
preview = gr.components.Image(
type="pil",
label="Preview",
interactive=False,
elem_id="faceswaplab_build_preview_face",
)
preview = gr.components.Image(type="pil", label="Preview", interactive=False, elem_id="faceswaplab_build_preview_face")
name = gr.Textbox(
value="Face",
placeholder="Name of the character",
label="Name of the character",
elem_id="faceswaplab_build_character_name"
elem_id="faceswaplab_build_character_name",
)
generate_checkpoint_btn = gr.Button(
"Save", elem_id="faceswaplab_build_save_btn"
)
generate_checkpoint_btn = gr.Button("Save",elem_id="faceswaplab_build_save_btn")
with gr.Tab("Compare"):
gr.Markdown(
"""Give a similarity score between two images (only first face is compared).""")
"""Give a similarity score between two images (only first face is compared)."""
)
with gr.Row():
img1 = gr.components.Image(type="pil",
label="Face 1",
elem_id="faceswaplab_compare_face1"
img1 = gr.components.Image(
type="pil", label="Face 1", elem_id="faceswaplab_compare_face1"
)
img2 = gr.components.Image(type="pil",
label="Face 2",
elem_id="faceswaplab_compare_face2"
img2 = gr.components.Image(
type="pil", label="Face 2", elem_id="faceswaplab_compare_face2"
)
compare_btn = gr.Button("Compare", elem_id="faceswaplab_compare_btn")
compare_result_text = gr.Textbox(
interactive=False, label="Similarity", value="0", elem_id="faceswaplab_compare_result"
interactive=False,
label="Similarity",
value="0",
elem_id="faceswaplab_compare_result",
)
with gr.Tab("Extract"):
gr.Markdown(
"""Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""")
"""Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab."""
)
with gr.Row():
extracted_source_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_extract_batch_images"
elem_id="faceswaplab_extract_batch_images",
)
extracted_faces = gr.Gallery(
label="Extracted faces", show_label=False,
elem_id="faceswaplab_extract_results"
label="Extracted faces",
show_label=False,
elem_id="faceswaplab_extract_results",
).style(columns=[2], rows=[2])
extract_save_path = gr.Textbox(label="Destination Directory", value="", elem_id="faceswaplab_extract_destination")
extract_save_path = gr.Textbox(
label="Destination Directory",
value="",
elem_id="faceswaplab_extract_destination",
)
extract_btn = gr.Button("Extract", elem_id="faceswaplab_extract_btn")
with gr.Tab("Explore Model"):
model = gr.Dropdown(
choices=models,
label="Model not found, please download one and reload automatic 1111",
elem_id="faceswaplab_explore_model"
elem_id="faceswaplab_explore_model",
)
explore_btn = gr.Button("Explore", elem_id="faceswaplab_explore_btn")
explore_result_text = gr.Dataframe(
interactive=False, label="Explored",
elem_id="faceswaplab_explore_result"
interactive=False,
label="Explored",
elem_id="faceswaplab_explore_result",
)
with gr.Tab("Analyse Face"):
img_to_analyse = gr.components.Image(type="pil", label="Face", elem_id="faceswaplab_analyse_face")
analyse_det_threshold = gr.Slider(0.1, 1, 0.5, step=0.01, label="Detection threshold", elem_id="faceswaplab_analyse_det_threshold")
img_to_analyse = gr.components.Image(
type="pil", label="Face", elem_id="faceswaplab_analyse_face"
)
analyse_det_threshold = gr.Slider(
0.1,
1,
0.5,
step=0.01,
label="Detection threshold",
elem_id="faceswaplab_analyse_det_threshold",
)
analyse_btn = gr.Button("Analyse", elem_id="faceswaplab_analyse_btn")
analyse_results = gr.Textbox(label="Results", interactive=False, value="", elem_id="faceswaplab_analyse_results")
analyse_results = gr.Textbox(
label="Results",
interactive=False,
value="",
elem_id="faceswaplab_analyse_results",
)
with gr.Tab("Batch Process"):
with gr.Tab("Source Images"):
gr.Markdown(
"""Batch process images. Will apply enhancement in the tools enhancement tab.""")
"""Batch process images. Will apply enhancement in the tools enhancement tab."""
)
with gr.Row():
batch_source_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_batch_images"
elem_id="faceswaplab_batch_images",
)
batch_results = gr.Gallery(
label="Batch result", show_label=False,
elem_id="faceswaplab_batch_results"
label="Batch result",
show_label=False,
elem_id="faceswaplab_batch_results",
).style(columns=[2], rows=[2])
batch_save_path = gr.Textbox(label="Destination Directory", value="outputs/faceswap/", elem_id="faceswaplab_batch_destination")
batch_save_btn= gr.Button("Process & Save", elem_id="faceswaplab_extract_btn")
batch_save_path = gr.Textbox(
label="Destination Directory",
value="outputs/faceswap/",
elem_id="faceswaplab_batch_destination",
)
batch_save_btn = gr.Button(
"Process & Save", elem_id="faceswaplab_extract_btn"
)
unit_components = []
for i in range(1, opts.data.get("faceswaplab_units_count", 3) + 1):
unit_components += faceswap_unit_ui(False, i, id_prefix="faceswaplab_tab")
upscale_options = upscaler_ui()
explore_btn.click(explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text])
explore_btn.click(
explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text]
)
compare_btn.click(compare, inputs=[img1, img2], outputs=[compare_result_text])
generate_checkpoint_btn.click(build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview])
extract_btn.click(extract_faces, inputs=[extracted_source_files, extract_save_path]+upscale_options, outputs=[extracted_faces])
analyse_btn.click(analyse_faces, inputs=[img_to_analyse,analyse_det_threshold], outputs=[analyse_results])
batch_save_btn.click(batch_process, inputs=[batch_source_files, batch_save_path]+unit_components+upscale_options, outputs=[batch_results])
generate_checkpoint_btn.click(
build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview]
)
extract_btn.click(
extract_faces,
inputs=[extracted_source_files, extract_save_path] + upscale_options,
outputs=[extracted_faces],
)
analyse_btn.click(
analyse_faces,
inputs=[img_to_analyse, analyse_det_threshold],
outputs=[analyse_results],
)
batch_save_btn.click(
batch_process,
inputs=[batch_source_files, batch_save_path]
+ unit_components
+ upscale_options,
outputs=[batch_results],
)
def on_ui_tabs():
with gr.Blocks(analytics_enabled=False) as ui_faceswap:
tools_ui()
return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")]

@ -8,12 +8,12 @@ import dill as pickle
import gradio as gr
from insightface.app.common import Face
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw)
from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.faceswaplab_logging import logger
@dataclass
class FaceSwapUnitSettings:
# ORDER of parameters is IMPORTANT. It should match the result of faceswap_unit_ui
# The image given in reference
@ -99,15 +99,19 @@ class FaceSwapUnitSettings:
logger.error("Failed to load checkpoint : %s", e)
elif self.source_img is not None:
if isinstance(self.source_img, str): # source_img is a base64 string
if 'base64,' in self.source_img: # check if the base64 string has a data URL scheme
base64_data = self.source_img.split('base64,')[-1]
if (
"base64," in self.source_img
): # check if the base64 string has a data URL scheme
base64_data = self.source_img.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode
img_bytes = base64.b64decode(self.source_img)
self.source_img = Image.open(io.BytesIO(img_bytes))
source_img = pil_to_cv2(self.source_img)
self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), self.reference_face_index, None)
self._reference_face = swapper.get_or_default(
swapper.get_faces(source_img), self.reference_face_index, None
)
if self._reference_face is None:
logger.error("Face not found in reference image")
else:
@ -125,14 +129,18 @@ class FaceSwapUnitSettings:
Only processed once.
"""
if self.batch_files is not None and not hasattr(self, "_faces"):
self._faces = [self.reference_face] if self.reference_face is not None else []
self._faces = (
[self.reference_face] if self.reference_face is not None else []
)
for file in self.batch_files:
if isinstance(file, Image.Image):
img = file
else:
img = Image.open(file.name)
face = swapper.get_or_default(swapper.get_faces(pil_to_cv2(img)), 0, None)
face = swapper.get_or_default(
swapper.get_faces(pil_to_cv2(img)), 0, None
)
if face is not None:
self._faces.append(face)
return self._faces
@ -144,9 +152,24 @@ class FaceSwapUnitSettings:
"""
if not hasattr(self, "_blended_faces"):
self._blended_faces = swapper.blend_faces(self.faces)
assert(all([not np.array_equal(self._blended_faces.embedding, face.embedding) for face in self.faces]) if len(self.faces) > 1 else True), "Blended faces cannot be the same as one of the face if len(face)>0"
assert(not np.array_equal(self._blended_faces.embedding,self.reference_face.embedding) if len(self.faces) > 1 else True), "Blended faces cannot be the same as reference face if len(face)>0"
assert (
all(
[
not np.array_equal(
self._blended_faces.embedding, face.embedding
)
for face in self.faces
]
)
if len(self.faces) > 1
else True
), "Blended faces cannot be the same as one of the face if len(face)>0"
assert (
not np.array_equal(
self._blended_faces.embedding, self.reference_face.embedding
)
if len(self.faces) > 1
else True
), "Blended faces cannot be the same as reference face if len(face)>0"
return self._blended_faces

@ -1,94 +1,143 @@
from scripts.faceswaplab_utils.models_utils import get_face_checkpoints
import gradio as gr
def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"):
with gr.Tab(f"Face {unit_num}"):
with gr.Column():
gr.Markdown(
"""Reference is an image. First face will be extracted.
First face of batches sources will be extracted and used as input (or blended if blend is activated).""")
First face of batches sources will be extracted and used as input (or blended if blend is activated)."""
)
with gr.Row():
img = gr.components.Image(type="pil", label="Reference", elem_id=f"{id_prefix}_face{unit_num}_reference_image")
img = gr.components.Image(
type="pil",
label="Reference",
elem_id=f"{id_prefix}_face{unit_num}_reference_image",
)
batch_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files"
elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files",
)
gr.Markdown(
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""")
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image."""
)
with gr.Row():
face = gr.Dropdown(
choices=get_face_checkpoints(),
label="Face Checkpoint (precedence over reference face)",
elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint"
elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint",
)
refresh = gr.Button(
value="",
variant="tool",
elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints",
)
refresh = gr.Button(value='', variant='tool', elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints")
def refresh_fn(selected):
return gr.Dropdown.update(value=selected, choices=get_face_checkpoints())
return gr.Dropdown.update(
value=selected, choices=get_face_checkpoints()
)
refresh.click(fn=refresh_fn, inputs=face, outputs=face)
with gr.Row():
enable = gr.Checkbox(False, placeholder="enable", label="Enable", elem_id=f"{id_prefix}_face{unit_num}_enable")
enable = gr.Checkbox(
False,
placeholder="enable",
label="Enable",
elem_id=f"{id_prefix}_face{unit_num}_enable",
)
blend_faces = gr.Checkbox(
True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)",
True,
placeholder="Blend Faces",
label="Blend Faces ((Source|Checkpoint)+References = 1)",
elem_id=f"{id_prefix}_face{unit_num}_blend_faces",
interactive=True
interactive=True,
)
gr.Markdown("""Discard images with low similarity or no faces :""")
with gr.Row():
check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity",
elem_id=f"{id_prefix}_face{unit_num}_check_similarity")
compute_similarity = gr.Checkbox(False, label="Compute similarity",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity")
min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity")
check_similarity = gr.Checkbox(
False,
placeholder="discard",
label="Check similarity",
elem_id=f"{id_prefix}_face{unit_num}_check_similarity",
)
compute_similarity = gr.Checkbox(
False,
label="Compute similarity",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity",
)
min_sim = gr.Slider(
0,
1,
0,
step=0.01,
label="Min similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity",
)
min_ref_sim = gr.Slider(
0, 1, 0, step=0.01, label="Min reference similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity"
0,
1,
0,
step=0.01,
label="Min reference similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity",
)
gr.Markdown("""Select the face to be swapped, you can sort by size or use the same gender as the desired face:""")
gr.Markdown(
"""Select the face to be swapped, you can sort by size or use the same gender as the desired face:"""
)
with gr.Row():
same_gender = gr.Checkbox(
False, placeholder="Same Gender", label="Same Gender",
elem_id=f"{id_prefix}_face{unit_num}_same_gender"
False,
placeholder="Same Gender",
label="Same Gender",
elem_id=f"{id_prefix}_face{unit_num}_same_gender",
)
sort_by_size = gr.Checkbox(
False, placeholder="Sort by size", label="Sort by size (larger>smaller)",
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size"
False,
placeholder="Sort by size",
label="Sort by size (larger>smaller)",
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size",
)
target_faces_index = gr.Textbox(
value="0",
placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)",
label="Target face : Comma separated face number(s)",
elem_id=f"{id_prefix}_face{unit_num}_target_faces_index"
elem_id=f"{id_prefix}_face{unit_num}_target_faces_index",
)
gr.Markdown(
"""The following will only affect reference face image (and is not affected by sort by size) :"""
)
gr.Markdown("""The following will only affect reference face image (and is not affected by sort by size) :""")
reference_faces_index = gr.Number(
value=0,
precision=0,
minimum=0,
placeholder="Which face to get from reference image start from 0",
label="Reference source face : start from 0",
elem_id=f"{id_prefix}_face{unit_num}_reference_face_index"
elem_id=f"{id_prefix}_face{unit_num}_reference_face_index",
)
gr.Markdown(
"""Configure swapping. Swapping can occure before img2img, after or both :""",
visible=is_img2img,
)
gr.Markdown("""Configure swapping. Swapping can occure before img2img, after or both :""", visible=is_img2img)
swap_in_source = gr.Checkbox(
False,
placeholder="Swap face in source image",
label="Swap in source image (blended face)",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_source"
elem_id=f"{id_prefix}_face{unit_num}_swap_in_source",
)
swap_in_generated = gr.Checkbox(
True,
placeholder="Swap face in generated image",
label="Swap in generated image",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated"
elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated",
)
# If changed, you need to change FaceSwapUnitSettings accordingly
# ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings

@ -6,63 +6,122 @@ from modules.shared import cmd_opts, opts, state
import scripts.faceswaplab_postprocessing.upscaling as upscaling
from scripts.faceswaplab_utils.faceswaplab_logging import logger
def upscaler_ui():
with gr.Tab(f"Post-Processing"):
gr.Markdown(
"""Upscaling is performed on the whole image. Upscaling happens before face restoration.""")
"""Upscaling is performed on the whole image. Upscaling happens before face restoration."""
)
with gr.Row():
face_restorer_name = gr.Radio(
label="Restore Face",
choices=["None"] + [x.name() for x in shared.face_restorers],
value=lambda : opts.data.get("faceswaplab_pp_default_face_restorer", shared.face_restorers[0].name()),
value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer",
shared.face_restorers[0].name(),
),
type="value",
elem_id="faceswaplab_pp_face_restorer"
elem_id="faceswaplab_pp_face_restorer",
)
with gr.Column():
face_restorer_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_visibility", 1), step=0.001, label="Restore visibility",
elem_id="faceswaplab_pp_face_restorer_visibility"
0,
1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer_visibility", 1
),
step=0.001,
label="Restore visibility",
elem_id="faceswaplab_pp_face_restorer_visibility",
)
codeformer_weight = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_weight", 1), step=0.001, label="codeformer weight",
elem_id="faceswaplab_pp_face_restorer_weight"
0,
1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_face_restorer_weight", 1
),
step=0.001,
label="codeformer weight",
elem_id="faceswaplab_pp_face_restorer_weight",
)
upscaler_name = gr.Dropdown(
choices=[upscaler.name for upscaler in shared.sd_upscalers],
value=lambda: opts.data.get("faceswaplab_pp_default_upscaler", "None"),
label="Upscaler",
elem_id="faceswaplab_pp_upscaler"
elem_id="faceswaplab_pp_upscaler",
)
upscaler_scale = gr.Slider(
1,
8,
1,
step=0.1,
label="Upscaler scale",
elem_id="faceswaplab_pp_upscaler_scale",
)
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale", elem_id="faceswaplab_pp_upscaler_scale")
upscaler_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_upscaler_visibility", 1), step=0.1, label="Upscaler visibility (if scale = 1)",
elem_id="faceswaplab_pp_upscaler_visibility"
0,
1,
value=lambda: opts.data.get(
"faceswaplab_pp_default_upscaler_visibility", 1
),
step=0.1,
label="Upscaler visibility (if scale = 1)",
elem_id="faceswaplab_pp_upscaler_visibility",
)
with gr.Accordion(f"Post Inpainting", open=True):
gr.Markdown(
"""Inpainting sends image to inpainting with a mask on face (once for each faces).""")
"""Inpainting sends image to inpainting with a mask on face (once for each faces)."""
)
inpainting_when = gr.Dropdown(
elem_id="faceswaplab_pp_inpainting_when", choices = [e.value for e in upscaling.InpaintingWhen.__members__.values()],value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value], label="Enable/When")
elem_id="faceswaplab_pp_inpainting_when",
choices=[
e.value for e in upscaling.InpaintingWhen.__members__.values()
],
value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value],
label="Enable/When",
)
inpainting_denoising_strength = gr.Slider(
0, 1, 0, step=0.01, elem_id="faceswaplab_pp_inpainting_denoising_strength", label="Denoising strenght (will send face to img2img after processing)"
0,
1,
0,
step=0.01,
elem_id="faceswaplab_pp_inpainting_denoising_strength",
label="Denoising strenght (will send face to img2img after processing)",
)
inpainting_denoising_prompt = gr.Textbox("Portrait of a [gender]",elem_id="faceswaplab_pp_inpainting_denoising_prompt", label="Inpainting prompt use [gender] instead of men or woman")
inpainting_denoising_negative_prompt = gr.Textbox("", elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt", label="Inpainting negative prompt use [gender] instead of men or woman")
inpainting_denoising_prompt = gr.Textbox(
"Portrait of a [gender]",
elem_id="faceswaplab_pp_inpainting_denoising_prompt",
label="Inpainting prompt use [gender] instead of men or woman",
)
inpainting_denoising_negative_prompt = gr.Textbox(
"",
elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt",
label="Inpainting negative prompt use [gender] instead of men or woman",
)
with gr.Row():
samplers_names = [s.name for s in modules.sd_samplers.all_samplers]
inpainting_sampler = gr.Dropdown(
choices=samplers_names,
value=[samplers_names[0]],
label="Inpainting Sampler",
elem_id="faceswaplab_pp_inpainting_sampler"
elem_id="faceswaplab_pp_inpainting_sampler",
)
inpainting_denoising_steps = gr.Slider(
1, 150, 20, step=1, label="Inpainting steps",
elem_id="faceswaplab_pp_inpainting_steps"
1,
150,
20,
step=1,
label="Inpainting steps",
elem_id="faceswaplab_pp_inpainting_steps",
)
inpaiting_model = gr.Dropdown(choices=["Current"]+sd_models.checkpoint_tiles(), default="Current", label="sd model (experimental)", elem_id="faceswaplab_pp_inpainting_sd_model")
inpaiting_model = gr.Dropdown(
choices=["Current"] + sd_models.checkpoint_tiles(),
default="Current",
label="sd model (experimental)",
elem_id="faceswaplab_pp_inpainting_sd_model",
)
return [
face_restorer_name,
face_restorer_visibility,
@ -76,5 +135,5 @@ def upscaler_ui():
inpainting_denoising_steps,
inpainting_sampler,
inpainting_when,
inpaiting_model
inpaiting_model,
]

@ -4,6 +4,7 @@ import sys
from modules import shared
from PIL import Image
class ColoredFormatter(logging.Formatter):
COLORS = {
"DEBUG": "\033[0;36m", # CYAN
@ -40,12 +41,16 @@ loglevel = getattr(logging, loglevel_string.upper(), "INFO")
logger.setLevel(loglevel)
import tempfile
if logger.getEffectiveLevel() <= logging.DEBUG:
DEBUG_DIR = tempfile.mkdtemp()
def save_img_debug(img: Image.Image, message: str, *opts):
if logger.getEffectiveLevel() <= logging.DEBUG:
with tempfile.NamedTemporaryFile(dir=DEBUG_DIR, delete=False, suffix=".png") as temp_file:
with tempfile.NamedTemporaryFile(
dir=DEBUG_DIR, delete=False, suffix=".png"
) as temp_file:
img_path = temp_file.name
img.save(img_path)

@ -10,6 +10,7 @@ from scripts.faceswaplab_globals import NSFW_SCORE
from modules import processing
import base64
def check_against_nsfw(img):
shapes = []
chunks = detect(img)
@ -17,6 +18,7 @@ def check_against_nsfw(img):
shapes.append(chunk["score"] > NSFW_SCORE)
return any(shapes)
def pil_to_cv2(pil_img):
return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
@ -24,6 +26,7 @@ def pil_to_cv2(pil_img):
def cv2_to_pil(cv2_img):
return Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB))
def torch_to_pil(images):
"""
Convert a numpy image or a batch of images to a PIL image.
@ -49,7 +52,10 @@ def pil_to_torch(pil_images):
torch_image = torch.from_numpy(numpy_image).permute(2, 0, 1)
return torch_image
from collections import Counter
def create_square_image(image_list):
"""
Creates a square image by combining multiple images in a grid pattern.
@ -101,6 +107,7 @@ def create_square_image(image_list):
# Return None if there are no images or only one image in the image_list
return None
def create_mask(image, box_coords):
width, height = image.size
mask = Image.new("L", (width, height), 255)
@ -113,7 +120,10 @@ def create_mask(image, box_coords):
mask.putpixel((x, y), 0)
return mask
def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch_index : int) -> Image.Image :
def apply_mask(
img: Image.Image, p: processing.StableDiffusionProcessing, batch_index: int
) -> Image.Image:
"""
Apply mask overlay and color correction to an image if enabled
@ -138,11 +148,12 @@ def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch
img = processing.apply_overlay(img, p.paste_to, batch_index, p.overlay_images)
if p.color_corrections is not None and batch_index < len(p.color_corrections):
img = processing.apply_color_correction(p.color_corrections[batch_index], img)
img = processing.apply_color_correction(
p.color_corrections[batch_index], img
)
return img
def prepare_mask(
mask: Image.Image, p: processing.StableDiffusionProcessing
) -> Image.Image:
@ -172,14 +183,14 @@ def prepare_mask(
# mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur))
return mask
def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]:
if base64str is None:
return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1]
if "base64," in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split("base64,")[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode
img_bytes = base64.b64decode(base64str)
return Image.open(io.BytesIO(img_bytes))

@ -1,4 +1,3 @@
import glob
import os
import modules.scripts as scripts
@ -7,6 +6,7 @@ from scripts.faceswaplab_globals import EXTENSION_PATH
from modules.shared import opts
from scripts.faceswaplab_utils.faceswaplab_logging import logger
def get_models():
"""
Retrieve a list of swap model files.
@ -29,6 +29,7 @@ def get_models():
return models
def get_current_model() -> str:
model = opts.data.get("faceswaplab_model", None)
if model is None:
@ -37,9 +38,12 @@ def get_current_model() -> str :
logger.info("Try to use model : %s", model)
if not os.path.isfile(model):
logger.error("The model %s cannot be found or loaded", model)
raise FileNotFoundError("No faceswap model found. Please add it to the faceswaplab directory.")
raise FileNotFoundError(
"No faceswap model found. Please add it to the faceswaplab directory."
)
return model
def get_face_checkpoints():
"""
Retrieve a list of face checkpoint paths.
@ -50,6 +54,8 @@ def get_face_checkpoints():
Returns:
list: A list of face paths, including the string "None" as the first element.
"""
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl")
faces_path = os.path.join(
scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl"
)
faces = glob.glob(faces_path)
return ["None"] + faces

Loading…
Cancel
Save