fix bugs, fix api, clear code

main
Tran Xen 2 years ago
parent a511214aaa
commit d755be1325

@ -5,6 +5,7 @@ from enum import Enum
import base64, io import base64, io
from io import BytesIO from io import BytesIO
from typing import List, Tuple, Optional from typing import List, Tuple, Optional
import numpy as np
class InpaintingWhen(Enum): class InpaintingWhen(Enum):
@ -41,7 +42,10 @@ class FaceSwapUnit(BaseModel):
blend_faces: bool = Field(description="Will blend faces if True", default=True) blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering # Use same gender filtering
same_gender: bool = Field(description="Use same gender filtering", default=True) same_gender: bool = Field(description="Use same gender filtering", default=False)
# Use same gender filtering
sort_by_size: bool = Field(description="Sort Faces by size", default=False)
# If True, discard images with low similarity # If True, discard images with low similarity
check_similarity: bool = Field( check_similarity: bool = Field(
@ -70,6 +74,18 @@ class FaceSwapUnit(BaseModel):
default=(0,), default=(0,),
) )
reference_face_index: int = Field(
description="The face index to use to extract face from reference",
default=0,
)
def get_batch_images(self) -> List[Image.Image]:
images = []
if self.batch_images:
for img in self.batch_images:
images.append(base64_to_pil(img))
return images
class PostProcessingOptions(BaseModel): class PostProcessingOptions(BaseModel):
face_restorer_name: str = Field(description="face restorer name", default=None) face_restorer_name: str = Field(description="face restorer name", default=None)
@ -82,7 +98,7 @@ class PostProcessingOptions(BaseModel):
upscaler_name: str = Field(description="upscaler name", default=None) upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0) scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscale_visibility: float = Field( upscaler_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0 description="upscaler visibility", default=1, le=1, ge=0
) )
@ -116,6 +132,9 @@ class PostProcessingOptions(BaseModel):
examples=[e.value for e in InpaintingWhen.__members__.values()], examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER, default=InpaintingWhen.NEVER,
) )
inpainting_model: str = Field(
description="Inpainting model", examples=["Current"], default="Current"
)
class FaceSwapRequest(BaseModel): class FaceSwapRequest(BaseModel):
@ -125,7 +144,7 @@ class FaceSwapRequest(BaseModel):
default=None, default=None,
) )
units: List[FaceSwapUnit] units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions postprocessing: Optional[PostProcessingOptions]
class FaceSwapResponse(BaseModel): class FaceSwapResponse(BaseModel):
@ -133,11 +152,11 @@ class FaceSwapResponse(BaseModel):
infos: List[str] infos: List[str]
@property @property
def pil_images(self): def pil_images(self) -> Image.Image:
return [base64_to_pil(img) for img in self.images] return [base64_to_pil(img) for img in self.images]
def pil_to_base64(img): def pil_to_base64(img: Image.Image) -> np.array: # type:ignore
if isinstance(img, str): if isinstance(img, str):
img = Image.open(img) img = Image.open(img)

@ -5,6 +5,7 @@ from client_utils import (
PostProcessingOptions, PostProcessingOptions,
FaceSwapResponse, FaceSwapResponse,
pil_to_base64, pil_to_base64,
InpaintingWhen,
) )
address = "http://127.0.0.1:7860" address = "http://127.0.0.1:7860"
@ -24,7 +25,14 @@ unit2 = FaceSwapUnit(
# Post-processing config : # Post-processing config :
pp = PostProcessingOptions( pp = PostProcessingOptions(
face_restorer_name="CodeFormer", codeformer_weight=0.5, restorer_visibility=1 face_restorer_name="CodeFormer",
codeformer_weight=0.5,
restorer_visibility=1,
upscaler_name="Lanczos",
scale=4,
inpainting_steps=30,
inpainting_denoising_strengh=0.1,
inpainting_when=InpaintingWhen.BEFORE_RESTORE_FACE,
) )
# Prepare the request # Prepare the request

@ -146,6 +146,7 @@ class FaceSwapScript(scripts.Script):
def process( def process(
self, p: StableDiffusionProcessing, *components: List[gr.components.Component] self, p: StableDiffusionProcessing, *components: List[gr.components.Component]
) -> None: ) -> None:
try:
self.read_config(p, *components) self.read_config(p, *components)
# If is instance of img2img, we check if face swapping in source is required. # If is instance of img2img, we check if face swapping in source is required.
@ -164,10 +165,13 @@ class FaceSwapScript(scripts.Script):
logger.info(f"processed init images: {len(init_images)}") logger.info(f"processed init images: {len(init_images)}")
if new_inits is not None: if new_inits is not None:
p.init_images = [img[0] for img in new_inits] p.init_images = [img[0] for img in new_inits]
except Exception as e:
logger.info("Failed to process : %s", e)
def postprocess( def postprocess(
self, p: StableDiffusionProcessing, processed: Processed, *args: List[Any] self, p: StableDiffusionProcessing, processed: Processed, *args: List[Any]
) -> None: ) -> None:
try:
if self.enabled: if self.enabled:
# Get the original images without the grid # Get the original images without the grid
orig_images: List[Image.Image] = processed.images[ orig_images: List[Image.Image] = processed.images[
@ -248,3 +252,5 @@ class FaceSwapScript(scripts.Script):
processed.images = images processed.images = images
processed.infotexts = infotexts processed.infotexts = infotexts
except Exception as e:
logger.error("Failed to swap face %s in postprocess method", e)

@ -3,7 +3,6 @@ import numpy as np
from fastapi import FastAPI from fastapi import FastAPI
from modules.api import api from modules.api import api
from scripts.faceswaplab_api.faceswaplab_api_types import ( from scripts.faceswaplab_api.faceswaplab_api_types import (
FaceSwapRequest,
FaceSwapResponse, FaceSwapResponse,
) )
from scripts.faceswaplab_globals import VERSION_FLAG from scripts.faceswaplab_globals import VERSION_FLAG
@ -16,9 +15,15 @@ from scripts.faceswaplab_utils.imgutils import (
) )
from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.models_utils import get_current_model
from modules.shared import opts from modules.shared import opts
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_api import faceswaplab_api_types
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
def encode_to_base64(image: Union[str, Image.Image, np.ndarray]) -> str: def encode_to_base64(image: Union[str, Image.Image, np.ndarray]) -> str: # type: ignore
""" """
Encode an image to a base64 string. Encode an image to a base64 string.
@ -40,7 +45,7 @@ def encode_to_base64(image: Union[str, Image.Image, np.ndarray]) -> str:
return "" return ""
def encode_np_to_base64(image: np.ndarray) -> str: def encode_np_to_base64(image: np.ndarray) -> str: # type: ignore
""" """
Encode a NumPy array to a base64 string. Encode a NumPy array to a base64 string.
@ -56,27 +61,37 @@ def encode_np_to_base64(image: np.ndarray) -> str:
return api.encode_pil_to_base64(pil) return api.encode_pil_to_base64(pil)
def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None: def get_postprocessing_options(
@app.get( options: faceswaplab_api_types.PostProcessingOptions,
"/faceswaplab/version", ) -> PostProcessingOptions:
tags=["faceswaplab"], pp_options = PostProcessingOptions(
description="Get faceswaplab version", face_restorer_name=options.face_restorer_name,
restorer_visibility=options.restorer_visibility,
codeformer_weight=options.codeformer_weight,
upscaler_name=options.upscaler_name,
scale=options.scale,
upscale_visibility=options.upscaler_visibility,
inpainting_denoising_strengh=options.inpainting_denoising_strengh,
inpainting_prompt=options.inpainting_prompt,
inpainting_negative_prompt=options.inpainting_negative_prompt,
inpainting_steps=options.inpainting_steps,
inpainting_sampler=options.inpainting_sampler,
inpainting_when=options.inpainting_when,
inpainting_model=options.inpainting_model,
) )
async def version() -> Dict[str, str]:
return {"version": VERSION_FLAG}
# use post as we consider the method non idempotent (which is debatable) assert isinstance(
@app.post( pp_options.inpainting_when, InpaintingWhen
"/faceswaplab/swap_face", ), "Value is not a valid InpaintingWhen enum"
tags=["faceswaplab"],
description="Swap a face in an image using units", return pp_options
)
async def swap_face(request: FaceSwapRequest) -> FaceSwapResponse:
units: List[FaceSwapUnitSettings] = [] def get_faceswap_units_settings(
src_image: Optional[Image.Image] = base64_to_pil(request.image) api_units: List[faceswaplab_api_types.FaceSwapUnit],
response = FaceSwapResponse(images=[], infos=[]) ) -> List[FaceSwapUnitSettings]:
if src_image is not None: units = []
for u in request.units: for u in api_units:
units.append( units.append(
FaceSwapUnitSettings( FaceSwapUnitSettings(
source_img=base64_to_pil(u.source_img), source_img=base64_to_pil(u.source_img),
@ -85,15 +100,46 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None:
blend_faces=u.blend_faces, blend_faces=u.blend_faces,
enable=True, enable=True,
same_gender=u.same_gender, same_gender=u.same_gender,
sort_by_size=u.sort_by_size,
check_similarity=u.check_similarity, check_similarity=u.check_similarity,
_compute_similarity=u.compute_similarity, _compute_similarity=u.compute_similarity,
min_ref_sim=u.min_ref_sim, min_ref_sim=u.min_ref_sim,
min_sim=u.min_sim, min_sim=u.min_sim,
_faces_index=",".join([str(i) for i in (u.faces_index)]), _faces_index=",".join([str(i) for i in (u.faces_index)]),
reference_face_index=u.reference_face_index,
swap_in_generated=True, swap_in_generated=True,
swap_in_source=False, swap_in_source=False,
) )
) )
return units
def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None:
@app.get(
"/faceswaplab/version",
tags=["faceswaplab"],
description="Get faceswaplab version",
)
async def version() -> Dict[str, str]:
return {"version": VERSION_FLAG}
# use post as we consider the method non idempotent (which is debatable)
@app.post(
"/faceswaplab/swap_face",
tags=["faceswaplab"],
description="Swap a face in an image using units",
)
async def swap_face(
request: faceswaplab_api_types.FaceSwapRequest,
) -> faceswaplab_api_types.FaceSwapResponse:
units: List[FaceSwapUnitSettings] = []
src_image: Optional[Image.Image] = base64_to_pil(request.image)
response = FaceSwapResponse(images=[], infos=[])
if request.postprocessing:
pp_options = get_postprocessing_options(request.postprocessing)
if src_image is not None:
units = get_faceswap_units_settings(request.units)
swapped_images = swapper.process_images_units( swapped_images = swapper.process_images_units(
get_current_model(), get_current_model(),
@ -102,6 +148,8 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None:
upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False), upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False),
) )
for img, info in swapped_images: for img, info in swapped_images:
if pp_options:
img = enhance_image(img, pp_options)
response.images.append(encode_to_base64(img)) response.images.append(encode_to_base64(img))
response.infos.append(info) response.infos.append(info)

@ -1,4 +1,4 @@
from typing import List, Tuple from typing import List, Optional, Tuple
from PIL import Image from PIL import Image
from scripts.faceswaplab_utils.imgutils import ( from scripts.faceswaplab_utils.imgutils import (
base64_to_pil, base64_to_pil,
@ -34,7 +34,10 @@ class FaceSwapUnit(BaseModel):
blend_faces: bool = Field(description="Will blend faces if True", default=True) blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering # Use same gender filtering
same_gender: bool = Field(description="Use same gender filtering", default=True) same_gender: bool = Field(description="Use same gender filtering", default=False)
# Use same gender filtering
sort_by_size: bool = Field(description="Sort Faces by size", default=False)
# If True, discard images with low similarity # If True, discard images with low similarity
check_similarity: bool = Field( check_similarity: bool = Field(
@ -63,6 +66,11 @@ class FaceSwapUnit(BaseModel):
default=(0,), default=(0,),
) )
reference_face_index: int = Field(
description="The face index to use to extract face from reference",
default=0,
)
def get_batch_images(self) -> List[Image.Image]: def get_batch_images(self) -> List[Image.Image]:
images = [] images = []
if self.batch_images: if self.batch_images:
@ -82,7 +90,7 @@ class PostProcessingOptions(BaseModel):
upscaler_name: str = Field(description="upscaler name", default=None) upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0) scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscale_visibility: float = Field( upscaler_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0 description="upscaler visibility", default=1, le=1, ge=0
) )
@ -116,6 +124,9 @@ class PostProcessingOptions(BaseModel):
examples=[e.value for e in InpaintingWhen.__members__.values()], examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER, default=InpaintingWhen.NEVER,
) )
inpainting_model: str = Field(
description="Inpainting model", examples=["Current"], default="Current"
)
class FaceSwapRequest(BaseModel): class FaceSwapRequest(BaseModel):
@ -125,7 +136,7 @@ class FaceSwapRequest(BaseModel):
default=None, default=None,
) )
units: List[FaceSwapUnit] units: List[FaceSwapUnit]
postprocessing: PostProcessingOptions postprocessing: Optional[PostProcessingOptions]
class FaceSwapResponse(BaseModel): class FaceSwapResponse(BaseModel):

@ -1,8 +1,12 @@
import os import os
from modules import scripts
MODELS_DIR = os.path.abspath(os.path.join("models", "faceswaplab")) MODELS_DIR = os.path.abspath(os.path.join("models", "faceswaplab"))
ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers")) ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers"))
FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser")) FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser"))
REFERENCE_PATH = os.path.join(
scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references"
)
VERSION_FLAG: str = "v1.1.0" VERSION_FLAG: str = "v1.1.0"
EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab") EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab")

@ -14,6 +14,7 @@ from scripts.faceswaplab_swapping import swapper
def img2img_diffusion(img: Image.Image, pp: PostProcessingOptions) -> Image.Image: def img2img_diffusion(img: Image.Image, pp: PostProcessingOptions) -> Image.Image:
if pp.inpainting_denoising_strengh == 0: if pp.inpainting_denoising_strengh == 0:
logger.info("Discard inpainting denoising strength is 0")
return img return img
try: try:
@ -25,7 +26,7 @@ inpainting_steps : {pp.inpainting_steps}
""" """
) )
if not isinstance(pp.inpainting_sampler, str): if not isinstance(pp.inpainting_sampler, str):
pass pp.inpainting_sampler = "Euler"
logger.info("send faces to image to image") logger.info("send faces to image to image")
img = img.copy() img = img.copy()

@ -11,17 +11,32 @@ from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_fa
def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image: def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
result_image = image result_image = image
try: try:
if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value: logger.debug("enhance_image, inpainting : %s", pp_options.inpainting_when)
result_image = img2img_diffusion(image, pp_options) result_image = image
if (
pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value
or pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING
):
logger.debug("Inpaint before upscale")
result_image = img2img_diffusion(result_image, pp_options)
result_image = upscale_img(result_image, pp_options) result_image = upscale_img(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value: if (
result_image = img2img_diffusion(image, pp_options) pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value
or pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE
):
logger.debug("Inpaint before restore")
result_image = img2img_diffusion(result_image, pp_options)
result_image = restore_face(result_image, pp_options) result_image = restore_face(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value: if (
result_image = img2img_diffusion(image, pp_options) pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value
or pp_options.inpainting_when == InpaintingWhen.AFTER_ALL
):
logger.debug("Inpaint after all")
result_image = img2img_diffusion(result_image, pp_options)
except Exception as e: except Exception as e:
logger.error("Failed to upscale %s", e) logger.error("Failed to upscale %s", e)

@ -19,7 +19,7 @@ class PostProcessingOptions:
codeformer_weight: float = 1 codeformer_weight: float = 1
upscaler_name: str = "" upscaler_name: str = ""
scale: int = 1 scale: float = 1
upscale_visibility: float = 0.5 upscale_visibility: float = 0.5
inpainting_denoising_strengh: float = 0 inpainting_denoising_strengh: float = 0

@ -24,39 +24,76 @@ from scripts.faceswaplab_postprocessing.postprocessing_options import (
) )
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from dataclasses import fields from dataclasses import fields
from typing import Any, List, Optional, Union from typing import Any, Dict, List, Optional
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.models_utils import get_current_model
import re import re
from scripts.faceswaplab_globals import REFERENCE_PATH
def compare(img1: Image.Image, img2: Image.Image) -> Union[float, str]: def compare(img1: Image.Image, img2: Image.Image) -> str:
"""
Compares the similarity between two faces extracted from images using cosine similarity.
Args:
img1: The first image containing a face.
img2: The second image containing a face.
Returns:
A str of a float value representing the similarity between the two faces (0 to 1).
Returns"You need 2 images to compare" if one or both of the images do not contain any faces.
"""
try:
if img1 is not None and img2 is not None: if img1 is not None and img2 is not None:
return swapper.compare_faces(img1, img2) return str(swapper.compare_faces(img1, img2))
except Exception as e:
logger.error("Fail to compare", e)
return "You need 2 images to compare" return "You need 2 images to compare"
def extract_faces( def extract_faces(
files, files: Optional[List[str]],
extract_path, extract_path: Optional[str],
*components: List[gr.components.Component], *components: List[gr.components.Component],
): ) -> Optional[List[str]]:
"""
Extracts faces from a list of image files.
Given a list of image file paths, this function opens each image, extracts the faces,
and saves them in a specified directory. Post-processing is applied to each extracted face,
and the processed faces are saved as separate PNG files.
Parameters:
files (Optional[List[str]]): List of file paths to the images to extract faces from.
extract_path (Optional[str]): Path where the extracted faces will be saved.
If no path is provided, a temporary directory will be created.
components (List[gr.components.Component]): List of components for post-processing.
Returns:
Optional[List[str]]: List of file paths to the saved images of the extracted faces.
If no faces are found, None is returned.
"""
try:
postprocess_options = PostProcessingOptions(*components) # type: ignore postprocess_options = PostProcessingOptions(*components) # type: ignore
if not extract_path: if not extract_path:
tempfile.mkdtemp() extract_path = tempfile.mkdtemp()
if files is not None:
if files:
images = [] images = []
for file in files: for file in files:
img = Image.open(file.name).convert("RGB") img = Image.open(file).convert("RGB")
faces = swapper.get_faces(pil_to_cv2(img)) faces = swapper.get_faces(pil_to_cv2(img))
if faces: if faces:
face_images = [] face_images = []
for face in faces: for face in faces:
bbox = face.bbox.astype(int) bbox = face.bbox.astype(int)
x_min, y_min, x_max, y_max = bbox x_min, y_min, x_max, y_max = bbox
face_image = img.crop((x_min, y_min, x_max, y_max)) face_image = img.crop((x_min, y_min, x_max, y_max))
if ( if (
postprocess_options.face_restorer_name postprocess_options.face_restorer_name
or postprocess_options.restorer_visibility or postprocess_options.restorer_visibility
@ -64,21 +101,55 @@ def extract_faces(
postprocess_options.scale = ( postprocess_options.scale = (
1 if face_image.width > 512 else 512 // face_image.width 1 if face_image.width > 512 else 512 // face_image.width
) )
face_image = enhance_image( face_image = enhance_image(face_image, postprocess_options)
face_image,
postprocess_options,
)
path = tempfile.NamedTemporaryFile( path = tempfile.NamedTemporaryFile(
delete=False, suffix=".png", dir=extract_path delete=False, suffix=".png", dir=extract_path
).name ).name
face_image.save(path) face_image.save(path)
face_images.append(path) face_images.append(path)
images += face_images images += face_images
return images return images
except Exception as e:
logger.info("Failed to extract : %s", e)
return None return None
def analyse_faces(image: Image.Image, det_threshold: float = 0.5) -> str: def analyse_faces(image: Image.Image, det_threshold: float = 0.5) -> Optional[str]:
"""
Function to analyze the faces in an image and provide a detailed report.
Parameters
----------
image : PIL.Image.Image
The input image where faces will be detected. The image must be a PIL Image object.
det_threshold : float, optional
The detection threshold for the face detection process, by default 0.5. It determines
the confidence level at which the function will consider a detected object as a face.
Value should be in the range [0, 1], with higher values indicating greater certainty.
Returns
-------
str or None
Returns a formatted string providing details about each face detected in the image.
For each face, the string will include an index and a set of facial details.
In the event of an exception (e.g., analysis failure), the function will log the error
and return None.
Raises
------
This function handles exceptions internally and does not raise.
Examples
--------
>>> image = Image.open("test.jpg")
>>> print(analyse_faces(image, 0.7))
"""
try: try:
faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold) faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold)
result = "" result = ""
@ -86,11 +157,12 @@ def analyse_faces(image: Image.Image, det_threshold: float = 0.5) -> str:
result += f"\nFace {i} \n" + "=" * 40 + "\n" result += f"\nFace {i} \n" + "=" * 40 + "\n"
result += pformat(face) + "\n" result += pformat(face) + "\n"
result += "=" * 40 result += "=" * 40
return result return result if result else None
except Exception as e: except Exception as e:
logger.error("Analysis Failed : %s", e) logger.error("Analysis Failed : %s", e)
return "Analysis Failed"
return None
def sanitize_name(name: str) -> str: def sanitize_name(name: str) -> str:
@ -116,14 +188,15 @@ def build_face_checkpoint_and_save(
Returns: Returns:
PIL.Image.Image or None: The resulting swapped face image if the process is successful; None otherwise. PIL.Image.Image or None: The resulting swapped face image if the process is successful; None otherwise.
""" """
try:
name = sanitize_name(name) name = sanitize_name(name)
batch_files = batch_files or [] batch_files = batch_files or []
logger.info("Build %s %s", name, [x.name for x in batch_files]) logger.info("Build %s %s", name, [x.name for x in batch_files])
faces = swapper.get_faces_from_img_files(batch_files) faces = swapper.get_faces_from_img_files(batch_files)
blended_face = swapper.blend_faces(faces) blended_face = swapper.blend_faces(faces)
preview_path = os.path.join( preview_path = REFERENCE_PATH
scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references"
)
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces") faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces")
os.makedirs(faces_path, exist_ok=True) os.makedirs(faces_path, exist_ok=True)
@ -172,12 +245,16 @@ def build_face_checkpoint_and_save(
return result_image return result_image
print("No face found") print("No face found")
except Exception as e:
logger.error("Failed to build checkpoint %s", e)
return None
return target_img return target_img
def explore_onnx_faceswap_model(model_path): def explore_onnx_faceswap_model(model_path: str) -> pd.DataFrame:
data = { try:
data: Dict[str, Any] = {
"Node Name": [], "Node Name": [],
"Op Type": [], "Op Type": [],
"Inputs": [], "Inputs": [],
@ -201,11 +278,14 @@ def explore_onnx_faceswap_model(model_path):
data["Attributes"].append(attributes) data["Attributes"].append(attributes)
df = pd.DataFrame(data) df = pd.DataFrame(data)
except Exception as e:
logger.info("Failed to explore model %s", e)
return None
return df return df
def batch_process( def batch_process(
files, save_path, *components: List[gr.components.Component] files: List[gr.File], save_path: str, *components: List[gr.components.Component]
) -> Optional[List[Image.Image]]: ) -> Optional[List[Image.Image]]:
try: try:
if save_path is not None: if save_path is not None:
@ -216,7 +296,7 @@ def batch_process(
# Parse and convert units flat components into FaceSwapUnitSettings # Parse and convert units flat components into FaceSwapUnitSettings
for i in range(0, units_count): for i in range(0, units_count):
units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] # type: ignore
for i, u in enumerate(units): for i, u in enumerate(units):
logger.debug("%s, %s", pformat(i), pformat(u)) logger.debug("%s, %s", pformat(i), pformat(u))

@ -3,7 +3,7 @@ import numpy as np
import base64 import base64
import io import io
from dataclasses import dataclass, fields from dataclasses import dataclass, fields
from typing import List, Union from typing import Any, List, Optional, Set, Union
import dill as pickle import dill as pickle
import gradio as gr import gradio as gr
from insightface.app.common import Face from insightface.app.common import Face
@ -50,14 +50,16 @@ class FaceSwapUnitSettings:
swap_in_generated: bool swap_in_generated: bool
@staticmethod @staticmethod
def get_unit_configuration(unit: int, components): def get_unit_configuration(
unit: int, components: List[gr.components.Component]
) -> Any:
fields_count = len(fields(FaceSwapUnitSettings)) fields_count = len(fields(FaceSwapUnitSettings))
return FaceSwapUnitSettings( return FaceSwapUnitSettings(
*components[unit * fields_count : unit * fields_count + fields_count] *components[unit * fields_count : unit * fields_count + fields_count]
) )
@property @property
def faces_index(self): def faces_index(self) -> Set[int]:
""" """
Convert _faces_index from str to int Convert _faces_index from str to int
""" """
@ -72,18 +74,18 @@ class FaceSwapUnitSettings:
return faces_index return faces_index
@property @property
def compute_similarity(self): def compute_similarity(self) -> bool:
return self._compute_similarity or self.check_similarity return self._compute_similarity or self.check_similarity
@property @property
def batch_files(self): def batch_files(self) -> List[gr.File]:
""" """
Return empty array instead of None for batch files Return empty array instead of None for batch files
""" """
return self._batch_files or [] return self._batch_files or []
@property @property
def reference_face(self): def reference_face(self) -> Optional[Face]:
""" """
Extract reference face (only once and store it for the rest of processing). Extract reference face (only once and store it for the rest of processing).
Reference face is the checkpoint or the source image or the first image in the batch in that order. Reference face is the checkpoint or the source image or the first image in the batch in that order.
@ -97,6 +99,7 @@ class FaceSwapUnitSettings:
self._reference_face = face self._reference_face = face
except Exception as e: except Exception as e:
logger.error("Failed to load checkpoint : %s", e) logger.error("Failed to load checkpoint : %s", e)
raise e
elif self.source_img is not None: elif self.source_img is not None:
if isinstance(self.source_img, str): # source_img is a base64 string if isinstance(self.source_img, str): # source_img is a base64 string
if ( if (
@ -119,11 +122,12 @@ class FaceSwapUnitSettings:
if self._reference_face is None: if self._reference_face is None:
logger.error("You need at least one reference face") logger.error("You need at least one reference face")
raise Exception("No reference face found")
return self._reference_face return self._reference_face
@property @property
def faces(self): def faces(self) -> List[Face]:
"""_summary_ """_summary_
Extract all faces (including reference face) to provide an array of faces Extract all faces (including reference face) to provide an array of faces
Only processed once. Only processed once.
@ -146,7 +150,7 @@ class FaceSwapUnitSettings:
return self._faces return self._faces
@property @property
def blended_faces(self): def blended_faces(self) -> Face:
""" """
Blend the faces using the mean of all embeddings Blend the faces using the mean of all embeddings
""" """

Loading…
Cancel
Save