diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..53af1e7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +repos: + - repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-docstring-first + - id: detect-private-key + - id: trailing-whitespace + - id: fix-byte-order-marker \ No newline at end of file diff --git a/docs/_config.yml b/docs/_config.yml index c93012a..b25be47 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -20,12 +20,12 @@ title: FaceSwap Lab description: >- # this means to ignore newlines until "baseurl:" - FaceSwapLab is an extension for Stable Diffusion that simplifies face-swapping. - Some key functions of FaceSwapLab include the ability to reuse faces via checkpoints, + FaceSwapLab is an extension for Stable Diffusion that simplifies face-swapping. + Some key functions of FaceSwapLab include the ability to reuse faces via checkpoints, batch process images, sort faces based on size or gender, and support for vladmantic. -domain: glucauze.github.io -url: https://glucauze.github.io -baseurl: /sd-webui-faceswaplab/ +domain: glucauze.github.io +url: https://glucauze.github.io +baseurl: /sd-webui-faceswaplab/ # Build settings theme: minima diff --git a/docs/documentation.markdown b/docs/documentation.markdown index 8ff5581..26a6a0c 100644 --- a/docs/documentation.markdown +++ b/docs/documentation.markdown @@ -92,7 +92,7 @@ The purpose of this feature is to enhance the quality of the face in the final i The upscaled inswapper is disabled by default. It can be enabled in the sd options. Understanding the various steps helps explain why results may be unsatisfactory and how to address this issue. -+ **upscaler** : LDSR if None. The LDSR option generally gives the best results but at the expense of a lot of computational time. You should test other models to form an opinion. The 003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN model seems to give good results in a reasonable amount of time. It's not possible to disable upscaling, but it is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory. ++ **upscaler** : LDSR if None. The LDSR option generally gives the best results but at the expense of a lot of computational time. You should test other models to form an opinion. The 003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN model seems to give good results in a reasonable amount of time. It's not possible to disable upscaling, but it is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory. + **restorer** : The face restorer to be used if necessary. Codeformer generally gives good results. + **sharpening** can provide more natural results, but it may also add artifacts. The same goes for **color correction**. By default, these options are set to False. + **improved mask:** The segmentation mask for the upscaled swapper is designed to avoid the square mask and prevent degradation of the non-face parts of the image. It is based on the Codeformer implementation. If "Use improved segmented mask (use pastenet to mask only the face)" and "upscaled inswapper" are checked in the settings, the mask will only cover the face, and will not be squared. However, depending on the image, this might introduce different types of problems such as artifacts on the border of the face. @@ -129,36 +129,36 @@ Here are the parameters that can be configured in sd settings and their default ### General Settings : - Name | Description | Default Value + Name | Description | Default Value ---|---|--- - faceswaplab_model | Insightface model to use| models[0] if len(models) > 0 else "None" - faceswaplab_keep_original | keep original image before swapping. It true, will show original image | False - faceswaplab_units_count | How many faces units to use(requires restart) | 3 - faceswaplab_detection_threshold | Detection threshold to use to detect face, if low will detect non human face as face | 0.5 + faceswaplab_model | Insightface model to use| models[0] if len(models) > 0 else "None" + faceswaplab_keep_original | keep original image before swapping. It true, will show original image | False + faceswaplab_units_count | How many faces units to use(requires restart) | 3 + faceswaplab_detection_threshold | Detection threshold to use to detect face, if low will detect non human face as face | 0.5 ### Default Settings : These parameters are used to configure the default settings displayed in post-processing. - Name | Description | Default Value - faceswaplab_pp_default_face_restorer | UI Default post processing face restorer (requires restart) | None - faceswaplab_pp_default_face_restorer_visibility | UI Default post processing face restorer visibility (requires restart) | 1 - faceswaplab_pp_default_face_restorer_weight | UI Default post processing face restorer weight (requires restart) | 1 - faceswaplab_pp_default_upscaler | UI Default post processing upscaler (requires restart) | None - faceswaplab_pp_default_upscaler_visibility | UI Default post processing upscaler visibility(requires restart) | 1 + Name | Description | Default Value + faceswaplab_pp_default_face_restorer | UI Default post processing face restorer (requires restart) | None + faceswaplab_pp_default_face_restorer_visibility | UI Default post processing face restorer visibility (requires restart) | 1 + faceswaplab_pp_default_face_restorer_weight | UI Default post processing face restorer weight (requires restart) | 1 + faceswaplab_pp_default_upscaler | UI Default post processing upscaler (requires restart) | None + faceswaplab_pp_default_upscaler_visibility | UI Default post processing upscaler visibility(requires restart) | 1 ### Upscaled inswapper Settings : These parameters are used to control the upscaled inswapper, see above. - Name | Description | Default Value - faceswaplab_upscaled_swapper | Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image | False - faceswaplab_upscaled_swapper_upscaler | Upscaled swapper upscaler (Recommended : LDSR but slow) | None - faceswaplab_upscaled_swapper_sharpen | Upscaled swapper sharpen | False - faceswaplab_upscaled_swapper_fixcolor | Upscaled swapper color correction | False - faceswaplab_upscaled_improved_mask | Use improved segmented mask (use pastenet to mask only the face) | True - faceswaplab_upscaled_swapper_face_restorer | Upscaled swapper face restorer | None - faceswaplab_upscaled_swapper_face_restorer_visibility | Upscaled swapper face restorer visibility | 1 - faceswaplab_upscaled_swapper_face_restorer_weight | Upscaled swapper face restorer weight (codeformer) | 1 - faceswaplab_upscaled_swapper_fthresh | Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact | 10 - faceswaplab_upscaled_swapper_erosion | Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible | 1 \ No newline at end of file + Name | Description | Default Value + faceswaplab_upscaled_swapper | Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image | False + faceswaplab_upscaled_swapper_upscaler | Upscaled swapper upscaler (Recommended : LDSR but slow) | None + faceswaplab_upscaled_swapper_sharpen | Upscaled swapper sharpen | False + faceswaplab_upscaled_swapper_fixcolor | Upscaled swapper color correction | False + faceswaplab_upscaled_improved_mask | Use improved segmented mask (use pastenet to mask only the face) | True + faceswaplab_upscaled_swapper_face_restorer | Upscaled swapper face restorer | None + faceswaplab_upscaled_swapper_face_restorer_visibility | Upscaled swapper face restorer visibility | 1 + faceswaplab_upscaled_swapper_face_restorer_weight | Upscaled swapper face restorer weight (codeformer) | 1 + faceswaplab_upscaled_swapper_fthresh | Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact | 10 + faceswaplab_upscaled_swapper_erosion | Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible | 1 \ No newline at end of file diff --git a/docs/examples.markdown b/docs/examples.markdown index ad423aa..89f2485 100644 --- a/docs/examples.markdown +++ b/docs/examples.markdown @@ -6,7 +6,7 @@ permalink: /examples/ These examples show how to use a painting as a source. No post-processing is activated, only the upscaled inswapper with LDSR, Codeformer and segmented mask. -Moliere: +Moliere: ![](/assets/images/example1.png) diff --git a/docs/faq.markdown b/docs/faq.markdown index 207fe49..aab71d0 100644 --- a/docs/faq.markdown +++ b/docs/faq.markdown @@ -46,7 +46,7 @@ The segmentation mask for the upscaled swapper is designed to avoid the square m #### How to increase speed of upscaled inswapper? -It is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory. +It is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory. #### Sharpening and color correction in upscaled swapper : @@ -56,7 +56,7 @@ Sharpening can provide more natural results, but it may also add artifacts. The If you do not see any extensions after restarting, it is likely due to missing requirements, particularly if you're using Windows. Follow the instructions below: -1. Verify that there are no error messages in the terminal. +1. Verify that there are no error messages in the terminal. 2. Double-check the Installation section of this document to ensure all the steps have been followed. If you are running a specific configuration (for example, Python 3.11), please test the extension with a clean installation of the stable version of Diffusion before reporting an issue. This can help isolate whether the problem is related to your specific configuration or a broader issue with the extension. @@ -65,14 +65,14 @@ If you are running a specific configuration (for example, Python 3.11), please t The model used in this extension initially reduces the resolution of the target face before generating a 128x128 image. This means that regardless of the original image's size, the resolution of the processed faces will not exceed 128x128. Consequently, this lower resolution might lead to quality limitations in the results. -The output of this process might not meet high expectations, but the use of the face restorer and upscaler can help improve these results to some extent. +The output of this process might not meet high expectations, but the use of the face restorer and upscaler can help improve these results to some extent. The quality of results is inherently tied to the capabilities of the model and cannot be enhanced beyond its design. FaceSwapLab merely provides an interface for the underlying model. Therefore, unless the model from insighface is retrained and necessary alterations are made in the library (see below), the resulting quality may not meet high expectations. Consider this extension as a low-cost alternative to more sophisticated tools like Lora, or as an addition to such tools. It's important to **maintain realistic expectations of the results** provided by this extension. -#### Issue: Incorrect Gender Detection +#### Issue: Incorrect Gender Detection The gender detection functionality is handled by the underlying analysis model. As such, there might be instances where the detected gender may not be accurate. This is a limitation of the model and we currently do not have a way to improve this accuracy from our end. diff --git a/docs/features.markdown b/docs/features.markdown index f497e68..b267bf9 100644 --- a/docs/features.markdown +++ b/docs/features.markdown @@ -102,10 +102,10 @@ pp = PostProcessingOptions( codeformer_weight=0.5, restorer_visibility= 1) -# Prepare the request +# Prepare the request request = FaceSwapRequest ( image = pil_to_base64("test_image.png"), - units= [unit1, unit2], + units= [unit1, unit2], postprocessing=pp ) diff --git a/docs/index.markdown b/docs/index.markdown index b6f4455..811e521 100644 --- a/docs/index.markdown +++ b/docs/index.markdown @@ -5,7 +5,7 @@ layout: home --- -FaceSwapLab is an extension for Stable Diffusion that simplifies the use of [insighface models](https://insightface.ai/) for face-swapping. It has evolved from sd-webui-faceswap and some part of sd-webui-roop. However, a substantial amount of the code has been rewritten to improve performance and to better manage masks. +FaceSwapLab is an extension for Stable Diffusion that simplifies the use of [insighface models](https://insightface.ai/) for face-swapping. It has evolved from sd-webui-faceswap and some part of sd-webui-roop. However, a substantial amount of the code has been rewritten to improve performance and to better manage masks. Some key [features](features) include the ability to reuse faces via checkpoints, multiple face units, batch process images, sort faces based on size or gender, and support for vladmantic. It also provides a face inpainting feature. diff --git a/docs/install.markdown b/docs/install.markdown index 5584732..3b47ca4 100644 --- a/docs/install.markdown +++ b/docs/install.markdown @@ -31,7 +31,7 @@ To install the extension, follow the steps below: **You may need to restart sd once the installation process is complete.** -On first launch, templates are downloaded, which may take some time. All models are located in the `models/faceswaplab` folder. +On first launch, templates are downloaded, which may take some time. All models are located in the `models/faceswaplab` folder. If you encounter the error `'NoneType' object has no attribute 'get'`, take the following steps: diff --git a/example/api/client_utils.py b/example/api/client_utils.py index 8883f57..98a4d16 100644 --- a/example/api/client_utils.py +++ b/example/api/client_utils.py @@ -6,89 +6,155 @@ import base64, io from io import BytesIO from typing import List, Tuple, Optional + class InpaintingWhen(Enum): NEVER = "Never" BEFORE_UPSCALING = "Before Upscaling/all" BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face" AFTER_ALL = "After All" -class FaceSwapUnit(BaseModel) : - + +class FaceSwapUnit(BaseModel): # The image given in reference - source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) + source_img: str = Field( + description="base64 reference image", + examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], + default=None, + ) # The checkpoint file - source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None) + source_face: str = Field( + description="face checkpoint (from models/faceswaplab/faces)", + examples=["my_face.pkl"], + default=None, + ) # base64 batch source images - batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) + batch_images: Tuple[str] = Field( + description="list of base64 batch source images", + examples=[ + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", + ], + default=None, + ) # Will blend faces if True - blend_faces: bool = Field(description='Will blend faces if True', default=True) - + blend_faces: bool = Field(description="Will blend faces if True", default=True) + # Use same gender filtering - same_gender: bool = Field(description='Use same gender filtering', default=True) + same_gender: bool = Field(description="Use same gender filtering", default=True) # If True, discard images with low similarity - check_similarity : bool = Field(description='If True, discard images with low similarity', default=False) + check_similarity: bool = Field( + description="If True, discard images with low similarity", default=False + ) # if True will compute similarity and add it to the image info - compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False) + compute_similarity: bool = Field( + description="If True will compute similarity and add it to the image info", + default=False, + ) # Minimum similarity against the used face (reference, batch or checkpoint) - min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0) + min_sim: float = Field( + description="Minimum similarity against the used face (reference, batch or checkpoint)", + default=0.0, + ) # Minimum similarity against the reference (reference or checkpoint if checkpoint is given) - min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0) + min_ref_sim: float = Field( + description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)", + default=0.0, + ) # The face index to use for swapping - faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,)) - - -class PostProcessingOptions (BaseModel): - face_restorer_name: str = Field(description='face restorer name', default=None) - restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0) - codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0) - - upscaler_name: str = Field(description='upscaler name', default=None) - scale: float = Field(description='upscaling scale', default=1, le=10, ge=0) - upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0) - - inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0) - inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]") - inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="") - inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20) - inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler") - inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER) - - -class FaceSwapRequest(BaseModel) : - image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) - units : List[FaceSwapUnit] - postprocessing : PostProcessingOptions - - -class FaceSwapResponse(BaseModel) : - images : List[str] = Field(description='base64 swapped image',default=None) - infos : List[str] + faces_index: Tuple[int] = Field( + description="The face index to use for swapping, list of face numbers starting from 0", + default=(0,), + ) + + +class PostProcessingOptions(BaseModel): + face_restorer_name: str = Field(description="face restorer name", default=None) + restorer_visibility: float = Field( + description="face restorer visibility", default=1, le=1, ge=0 + ) + codeformer_weight: float = Field( + description="face restorer codeformer weight", default=1, le=1, ge=0 + ) + + upscaler_name: str = Field(description="upscaler name", default=None) + scale: float = Field(description="upscaling scale", default=1, le=10, ge=0) + upscale_visibility: float = Field( + description="upscaler visibility", default=1, le=1, ge=0 + ) + + inpainting_denoising_strengh: float = Field( + description="Inpainting denoising strenght", default=0, lt=1, ge=0 + ) + inpainting_prompt: str = Field( + description="Inpainting denoising strenght", + examples=["Portrait of a [gender]"], + default="Portrait of a [gender]", + ) + inpainting_negative_prompt: str = Field( + description="Inpainting denoising strenght", + examples=[ + "Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation" + ], + default="", + ) + inpainting_steps: int = Field( + description="Inpainting steps", + examples=["Portrait of a [gender]"], + ge=1, + le=150, + default=20, + ) + inpainting_sampler: str = Field( + description="Inpainting sampler", examples=["Euler"], default="Euler" + ) + inpainting_when: InpaintingWhen = Field( + description="When inpainting happens", + examples=[e.value for e in InpaintingWhen.__members__.values()], + default=InpaintingWhen.NEVER, + ) + + +class FaceSwapRequest(BaseModel): + image: str = Field( + description="base64 reference image", + examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], + default=None, + ) + units: List[FaceSwapUnit] + postprocessing: PostProcessingOptions + + +class FaceSwapResponse(BaseModel): + images: List[str] = Field(description="base64 swapped image", default=None) + infos: List[str] @property - def pil_images(self) : + def pil_images(self): return [base64_to_pil(img) for img in self.images] + def pil_to_base64(img): if isinstance(img, str): img = Image.open(img) buffer = BytesIO() - img.save(buffer, format='PNG') + img.save(buffer, format="PNG") img_data = buffer.getvalue() base64_data = base64.b64encode(img_data) - return base64_data.decode('utf-8') + return base64_data.decode("utf-8") + -def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] : - if base64str is None : +def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]: + if base64str is None: return None - if 'base64,' in base64str: # check if the base64 string has a data URL scheme - base64_data = base64str.split('base64,')[-1] + if "base64," in base64str: # check if the base64 string has a data URL scheme + base64_data = base64str.split("base64,")[-1] img_bytes = base64.b64decode(base64_data) else: # if no data URL scheme, just decode img_bytes = base64.b64decode(base64str) - return Image.open(io.BytesIO(img_bytes)) \ No newline at end of file + return Image.open(io.BytesIO(img_bytes)) diff --git a/example/api/roop_api_example.py b/example/api/roop_api_example.py index 1e6afc2..f38539a 100644 --- a/example/api/roop_api_example.py +++ b/example/api/roop_api_example.py @@ -1,39 +1,45 @@ import requests from PIL import Image -from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64 +from client_utils import ( + FaceSwapRequest, + FaceSwapUnit, + PostProcessingOptions, + FaceSwapResponse, + pil_to_base64, +) -address = 'http://127.0.0.1:7860' +address = "http://127.0.0.1:7860" # First face unit : unit1 = FaceSwapUnit( - source_img=pil_to_base64("../../references/man.png"), # The face you want to use - faces_index=(0,) # Replace first face + source_img=pil_to_base64("../../references/man.png"), # The face you want to use + faces_index=(0,), # Replace first face ) # Second face unit : unit2 = FaceSwapUnit( - source_img=pil_to_base64("../../references/woman.png"), # The face you want to use + source_img=pil_to_base64("../../references/woman.png"), # The face you want to use same_gender=True, - faces_index=(0,) # Replace first woman since same gender is on + faces_index=(0,), # Replace first woman since same gender is on ) # Post-processing config : pp = PostProcessingOptions( - face_restorer_name="CodeFormer", - codeformer_weight=0.5, - restorer_visibility= 1) - -# Prepare the request -request = FaceSwapRequest ( - image = pil_to_base64("test_image.png"), - units= [unit1, unit2], - postprocessing=pp + face_restorer_name="CodeFormer", codeformer_weight=0.5, restorer_visibility=1 +) + +# Prepare the request +request = FaceSwapRequest( + image=pil_to_base64("test_image.png"), units=[unit1, unit2], postprocessing=pp ) -result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"}) +result = requests.post( + url=f"{address}/faceswaplab/swap_face", + data=request.json(), + headers={"Content-Type": "application/json; charset=utf-8"}, +) response = FaceSwapResponse.parse_obj(result.json()) for img, info in zip(response.pil_images, response.infos): - img.show(title = info) - + img.show(title=info) diff --git a/install.py b/install.py index acbe6ab..5ae1104 100644 --- a/install.py +++ b/install.py @@ -8,16 +8,26 @@ import urllib.request req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt") models_dir = os.path.abspath("models/faceswaplab") -faces_dir = os.path.abspath(os.path.join("models","faceswaplab","faces")) +faces_dir = os.path.abspath(os.path.join("models", "faceswaplab", "faces")) model_url = "https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx" model_name = os.path.basename(model_url) model_path = os.path.join(models_dir, model_name) + def download(url, path): request = urllib.request.urlopen(url) - total = int(request.headers.get('Content-Length', 0)) - with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress: - urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) + total = int(request.headers.get("Content-Length", 0)) + with tqdm( + total=total, desc="Downloading", unit="B", unit_scale=True, unit_divisor=1024 + ) as progress: + urllib.request.urlretrieve( + url, + path, + reporthook=lambda count, block_size, total_size: progress.update( + block_size + ), + ) + os.makedirs(models_dir, exist_ok=True) os.makedirs(faces_dir, exist_ok=True) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..d90ae92 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,7 @@ +[mypy] +check_untyped_defs = True +disallow_any_generics = True +disallow_untyped_calls = True +disallow_untyped_defs = True +ignore_missing_imports = True +strict_optional = False \ No newline at end of file diff --git a/scripts/faceswaplab.py b/scripts/faceswaplab.py index baa80ef..37a2e0a 100644 --- a/scripts/faceswaplab.py +++ b/scripts/faceswaplab.py @@ -2,17 +2,20 @@ import importlib from scripts.faceswaplab_api import faceswaplab_api from scripts.faceswaplab_settings import faceswaplab_settings from scripts.faceswaplab_ui import faceswaplab_tab, faceswaplab_unit_ui -from scripts.faceswaplab_utils.models_utils import get_current_model, get_face_checkpoints +from scripts.faceswaplab_utils.models_utils import ( + get_current_model, + get_face_checkpoints, +) -from scripts import (faceswaplab_globals) +from scripts import faceswaplab_globals from scripts.faceswaplab_swapping import swapper from scripts.faceswaplab_utils import faceswaplab_logging, imgutils from scripts.faceswaplab_utils import models_utils from scripts.faceswaplab_postprocessing import upscaling import numpy as np -#Reload all the modules when using "apply and restart" -#This is mainly done for development purposes +# Reload all the modules when using "apply and restart" +# This is mainly done for development purposes importlib.reload(swapper) importlib.reload(faceswaplab_logging) importlib.reload(faceswaplab_globals) @@ -35,20 +38,25 @@ from modules import script_callbacks, scripts from insightface.app.common import Face from modules import scripts, shared from modules.images import save_image, image_grid -from modules.processing import (Processed, StableDiffusionProcessing, - StableDiffusionProcessingImg2Img) +from modules.processing import ( + Processed, + StableDiffusionProcessing, + StableDiffusionProcessingImg2Img, +) from modules.shared import opts from PIL import Image -from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw) +from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug from scripts.faceswaplab_globals import VERSION_FLAG -from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, +) from scripts.faceswaplab_postprocessing.postprocessing import enhance_image from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings -EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab") +EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab") # Register the tab, done here to prevent it from being added twice @@ -56,44 +64,44 @@ script_callbacks.on_ui_tabs(faceswaplab_tab.on_ui_tabs) try: import modules.script_callbacks as script_callbacks + script_callbacks.on_app_started(faceswaplab_api.faceswaplab_api) except: pass class FaceSwapScript(scripts.Script): - def __init__(self) -> None: logger.info(f"FaceSwapLab {VERSION_FLAG}") super().__init__() @property - def units_count(self) : + def units_count(self): return opts.data.get("faceswaplab_units_count", 3) - + @property - def upscaled_swapper_in_generated(self) : + def upscaled_swapper_in_generated(self): return opts.data.get("faceswaplab_upscaled_swapper", False) - + @property - def upscaled_swapper_in_source(self) : + def upscaled_swapper_in_source(self): return opts.data.get("faceswaplab_upscaled_swapper_in_source", False) - + @property - def enabled(self) -> bool : + def enabled(self) -> bool: """Return True if any unit is enabled and the state is not interupted""" return any([u.enable for u in self.units]) and not shared.state.interrupted @property - def keep_original_images(self) : + def keep_original_images(self): return opts.data.get("faceswaplab_keep_original", False) @property - def swap_in_generated_units(self) : + def swap_in_generated_units(self): return [u for u in self.units if u.swap_in_generated and u.enable] @property - def swap_in_source_units(self) : + def swap_in_source_units(self): return [u for u in self.units if u.swap_in_source and u.enable] def title(self): @@ -102,7 +110,6 @@ class FaceSwapScript(scripts.Script): def show(self, is_img2img): return scripts.AlwaysVisible - def ui(self, is_img2img): with gr.Accordion(f"FaceSwapLab {VERSION_FLAG}", open=False): components = [] @@ -121,23 +128,23 @@ class FaceSwapScript(scripts.Script): # print("Running in ", alwayson.index(self), "position") # logger.info("Running scripts : %s", pformat(runner.alwayson_scripts)) - def read_config(self, p : StableDiffusionProcessing, *components) : + def read_config(self, p: StableDiffusionProcessing, *components): # The order of processing for the components is important - # The method first process faceswap units then postprocessing units + # The method first process faceswap units then postprocessing units # self.make_first_script(p) self.units: List[FaceSwapUnitSettings] = [] - - #Parse and convert units flat components into FaceSwapUnitSettings + + # Parse and convert units flat components into FaceSwapUnitSettings for i in range(0, self.units_count): self.units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] for i, u in enumerate(self.units): logger.debug("%s, %s", pformat(i), pformat(u)) - #Parse the postprocessing options - #We must first find where to start from (after face swapping units) + # Parse the postprocessing options + # We must first find where to start from (after face swapping units) len_conf: int = len(fields(FaceSwapUnitSettings)) shift: int = self.units_count * len_conf self.postprocess_options = PostProcessingOptions( @@ -145,67 +152,92 @@ class FaceSwapScript(scripts.Script): ) logger.debug("%s", pformat(self.postprocess_options)) - if self.enabled : + if self.enabled: p.do_not_save_samples = not self.keep_original_images - def process(self, p: StableDiffusionProcessing, *components): self.read_config(p, *components) - #If is instance of img2img, we check if face swapping in source is required. + # If is instance of img2img, we check if face swapping in source is required. if isinstance(p, StableDiffusionProcessingImg2Img): if self.enabled and len(self.swap_in_source_units) > 0: - init_images : List[Tuple[Optional[Image.Image], Optional[str]]] = [(img,None) for img in p.init_images] - new_inits = swapper.process_images_units(get_current_model(), self.swap_in_source_units,images=init_images, upscaled_swapper=self.upscaled_swapper_in_source,force_blend=True) + init_images: List[Tuple[Optional[Image.Image], Optional[str]]] = [ + (img, None) for img in p.init_images + ] + new_inits = swapper.process_images_units( + get_current_model(), + self.swap_in_source_units, + images=init_images, + upscaled_swapper=self.upscaled_swapper_in_source, + force_blend=True, + ) logger.info(f"processed init images: {len(init_images)}") - if new_inits is not None : - p.init_images = [img[0] for img in new_inits] - + if new_inits is not None: + p.init_images = [img[0] for img in new_inits] - def postprocess(self, p : StableDiffusionProcessing, processed: Processed, *args): - if self.enabled : + def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args): + if self.enabled: # Get the original images without the grid - orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:] - orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:] + orig_images: List[Image.Image] = processed.images[ + processed.index_of_first_image : + ] + orig_infotexts: List[str] = processed.infotexts[ + processed.index_of_first_image : + ] keep_original = self.keep_original_images # These are were images and infos of swapped images will be stored images = [] infotexts = [] - if (len(self.swap_in_generated_units))>0 : - for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)): - batch_index = i%p.batch_size - swapped_images = swapper.process_images_units(get_current_model(), self.swap_in_generated_units, images=[(img,info)], upscaled_swapper=self.upscaled_swapper_in_generated) - if swapped_images is None : + if (len(self.swap_in_generated_units)) > 0: + for i, (img, info) in enumerate(zip(orig_images, orig_infotexts)): + batch_index = i % p.batch_size + swapped_images = swapper.process_images_units( + get_current_model(), + self.swap_in_generated_units, + images=[(img, info)], + upscaled_swapper=self.upscaled_swapper_in_generated, + ) + if swapped_images is None: continue logger.info(f"{len(swapped_images)} images swapped") - for swp_img, new_info in swapped_images : - img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical) + for swp_img, new_info in swapped_images: + img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical) - if swp_img is not None : - - save_img_debug(swp_img,"Before apply mask") + if swp_img is not None: + save_img_debug(swp_img, "Before apply mask") swp_img = imgutils.apply_mask(swp_img, p, batch_index) - save_img_debug(swp_img,"After apply mask") + save_img_debug(swp_img, "After apply mask") - try : + try: if self.postprocess_options is not None: - swp_img = enhance_image(swp_img, self.postprocess_options) + swp_img = enhance_image( + swp_img, self.postprocess_options + ) except Exception as e: logger.error("Failed to upscale : %s", e) logger.info("Add swp image to processed") images.append(swp_img) infotexts.append(new_info) - if p.outpath_samples and opts.samples_save : - save_image(swp_img, p.outpath_samples, "", p.all_seeds[batch_index], p.all_prompts[batch_index], opts.samples_format,info=new_info, p=p, suffix="-swapped") - else : + if p.outpath_samples and opts.samples_save: + save_image( + swp_img, + p.outpath_samples, + "", + p.all_seeds[batch_index], + p.all_prompts[batch_index], + opts.samples_format, + info=new_info, + p=p, + suffix="-swapped", + ) + else: logger.error("swp image is None") - else : - keep_original=True - + else: + keep_original = True # Generate grid : if opts.return_grid and len(images) > 1: @@ -223,4 +255,4 @@ class FaceSwapScript(scripts.Script): infotexts += processed.infotexts processed.images = images - processed.infotexts = infotexts \ No newline at end of file + processed.infotexts = infotexts diff --git a/scripts/faceswaplab_api/faceswaplab_api.py b/scripts/faceswaplab_api/faceswaplab_api.py index 570ca59..012cc9a 100644 --- a/scripts/faceswaplab_api/faceswaplab_api.py +++ b/scripts/faceswaplab_api/faceswaplab_api.py @@ -4,14 +4,22 @@ from fastapi import FastAPI, Body from fastapi.exceptions import HTTPException from modules.api.models import * from modules.api import api -from scripts.faceswaplab_api.faceswaplab_api_types import FaceSwapUnit, FaceSwapRequest, FaceSwapResponse +from scripts.faceswaplab_api.faceswaplab_api_types import ( + FaceSwapUnit, + FaceSwapRequest, + FaceSwapResponse, +) from scripts.faceswaplab_globals import VERSION_FLAG import gradio as gr from typing import List, Optional from scripts.faceswaplab_swapping import swapper from scripts.faceswaplab_utils.faceswaplab_logging import save_img_debug from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings -from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil) +from scripts.faceswaplab_utils.imgutils import ( + pil_to_cv2, + check_against_nsfw, + base64_to_pil, +) from scripts.faceswaplab_utils.models_utils import get_current_model from modules.shared import opts @@ -26,45 +34,59 @@ def encode_to_base64(image): else: return "" + def encode_np_to_base64(image): pil = Image.fromarray(image) return api.encode_pil_to_base64(pil) def faceswaplab_api(_: gr.Blocks, app: FastAPI): - @app.get("/faceswaplab/version", tags=["faceswaplab"], description="Get faceswaplab version") + @app.get( + "/faceswaplab/version", + tags=["faceswaplab"], + description="Get faceswaplab version", + ) async def version(): return {"version": VERSION_FLAG} - + # use post as we consider the method non idempotent (which is debatable) - @app.post("/faceswaplab/swap_face", tags=["faceswaplab"], description="Swap a face in an image using units") - async def swap_face(request : FaceSwapRequest) -> FaceSwapResponse: - units : List[FaceSwapUnitSettings]= [] - src_image : Optional[Image.Image] = base64_to_pil(request.image) - response = FaceSwapResponse(images = [], infos=[]) - if src_image is not None : + @app.post( + "/faceswaplab/swap_face", + tags=["faceswaplab"], + description="Swap a face in an image using units", + ) + async def swap_face(request: FaceSwapRequest) -> FaceSwapResponse: + units: List[FaceSwapUnitSettings] = [] + src_image: Optional[Image.Image] = base64_to_pil(request.image) + response = FaceSwapResponse(images=[], infos=[]) + if src_image is not None: for u in request.units: units.append( - FaceSwapUnitSettings(source_img=base64_to_pil(u.source_img), - source_face = u.source_face, - _batch_files = u.get_batch_images(), - blend_faces= u.blend_faces, - enable = True, - same_gender = u.same_gender, - check_similarity=u.check_similarity, - _compute_similarity=u.compute_similarity, - min_ref_sim= u.min_ref_sim, - min_sim= u.min_sim, - _faces_index = ",".join([str(i) for i in (u.faces_index)]), - swap_in_generated=True, - swap_in_source=False - ) + FaceSwapUnitSettings( + source_img=base64_to_pil(u.source_img), + source_face=u.source_face, + _batch_files=u.get_batch_images(), + blend_faces=u.blend_faces, + enable=True, + same_gender=u.same_gender, + check_similarity=u.check_similarity, + _compute_similarity=u.compute_similarity, + min_ref_sim=u.min_ref_sim, + min_sim=u.min_sim, + _faces_index=",".join([str(i) for i in (u.faces_index)]), + swap_in_generated=True, + swap_in_source=False, + ) ) - swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False)) + swapped_images = swapper.process_images_units( + get_current_model(), + images=[(src_image, None)], + units=units, + upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False), + ) for img, info in swapped_images: response.images.append(encode_to_base64(img)) response.infos.append(info) return response - diff --git a/scripts/faceswaplab_api/faceswaplab_api_types.py b/scripts/faceswaplab_api/faceswaplab_api_types.py index 41b75fe..368840a 100644 --- a/scripts/faceswaplab_api/faceswaplab_api_types.py +++ b/scripts/faceswaplab_api/faceswaplab_api_types.py @@ -5,69 +5,137 @@ import dill as pickle import gradio as gr from insightface.app.common import Face from PIL import Image -from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil) +from scripts.faceswaplab_utils.imgutils import ( + pil_to_cv2, + check_against_nsfw, + base64_to_pil, +) from scripts.faceswaplab_utils.faceswaplab_logging import logger from pydantic import BaseModel, Field from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen -class FaceSwapUnit(BaseModel) : - +class FaceSwapUnit(BaseModel): # The image given in reference - source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) + source_img: str = Field( + description="base64 reference image", + examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], + default=None, + ) # The checkpoint file - source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None) + source_face: str = Field( + description="face checkpoint (from models/faceswaplab/faces)", + examples=["my_face.pkl"], + default=None, + ) # base64 batch source images - batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) + batch_images: Tuple[str] = Field( + description="list of base64 batch source images", + examples=[ + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", + ], + default=None, + ) # Will blend faces if True - blend_faces: bool = Field(description='Will blend faces if True', default=True) - + blend_faces: bool = Field(description="Will blend faces if True", default=True) + # Use same gender filtering - same_gender: bool = Field(description='Use same gender filtering', default=True) + same_gender: bool = Field(description="Use same gender filtering", default=True) # If True, discard images with low similarity - check_similarity : bool = Field(description='If True, discard images with low similarity', default=False) + check_similarity: bool = Field( + description="If True, discard images with low similarity", default=False + ) # if True will compute similarity and add it to the image info - compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False) + compute_similarity: bool = Field( + description="If True will compute similarity and add it to the image info", + default=False, + ) # Minimum similarity against the used face (reference, batch or checkpoint) - min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0) + min_sim: float = Field( + description="Minimum similarity against the used face (reference, batch or checkpoint)", + default=0.0, + ) # Minimum similarity against the reference (reference or checkpoint if checkpoint is given) - min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0) + min_ref_sim: float = Field( + description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)", + default=0.0, + ) # The face index to use for swapping - faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,)) + faces_index: Tuple[int] = Field( + description="The face index to use for swapping, list of face numbers starting from 0", + default=(0,), + ) - def get_batch_images(self) -> List[Image.Image] : + def get_batch_images(self) -> List[Image.Image]: images = [] - if self.batch_images : - for img in self.batch_images : + if self.batch_images: + for img in self.batch_images: images.append(base64_to_pil(img)) return images -class PostProcessingOptions (BaseModel): - face_restorer_name: str = Field(description='face restorer name', default=None) - restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0) - codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0) - - upscaler_name: str = Field(description='upscaler name', default=None) - scale: float = Field(description='upscaling scale', default=1, le=10, ge=0) - upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0) - - inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0) - inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]") - inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="") - inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20) - inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler") - inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER) - - -class FaceSwapRequest(BaseModel) : - image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None) - units : List[FaceSwapUnit] - postprocessing : PostProcessingOptions - -class FaceSwapResponse(BaseModel) : - images : List[str] = Field(description='base64 swapped image',default=None) - infos : List[str] \ No newline at end of file + +class PostProcessingOptions(BaseModel): + face_restorer_name: str = Field(description="face restorer name", default=None) + restorer_visibility: float = Field( + description="face restorer visibility", default=1, le=1, ge=0 + ) + codeformer_weight: float = Field( + description="face restorer codeformer weight", default=1, le=1, ge=0 + ) + + upscaler_name: str = Field(description="upscaler name", default=None) + scale: float = Field(description="upscaling scale", default=1, le=10, ge=0) + upscale_visibility: float = Field( + description="upscaler visibility", default=1, le=1, ge=0 + ) + + inpainting_denoising_strengh: float = Field( + description="Inpainting denoising strenght", default=0, lt=1, ge=0 + ) + inpainting_prompt: str = Field( + description="Inpainting denoising strenght", + examples=["Portrait of a [gender]"], + default="Portrait of a [gender]", + ) + inpainting_negative_prompt: str = Field( + description="Inpainting denoising strenght", + examples=[ + "Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation" + ], + default="", + ) + inpainting_steps: int = Field( + description="Inpainting steps", + examples=["Portrait of a [gender]"], + ge=1, + le=150, + default=20, + ) + inpainting_sampler: str = Field( + description="Inpainting sampler", examples=["Euler"], default="Euler" + ) + inpainting_when: InpaintingWhen = Field( + description="When inpainting happens", + examples=[e.value for e in InpaintingWhen.__members__.values()], + default=InpaintingWhen.NEVER, + ) + + +class FaceSwapRequest(BaseModel): + image: str = Field( + description="base64 reference image", + examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], + default=None, + ) + units: List[FaceSwapUnit] + postprocessing: PostProcessingOptions + + +class FaceSwapResponse(BaseModel): + images: List[str] = Field(description="base64 swapped image", default=None) + infos: List[str] diff --git a/scripts/faceswaplab_globals.py b/scripts/faceswaplab_globals.py index 417710b..4df9abb 100644 --- a/scripts/faceswaplab_globals.py +++ b/scripts/faceswaplab_globals.py @@ -1,11 +1,10 @@ from scripts.faceswaplab_utils.faceswaplab_logging import logger import os -MODELS_DIR = os.path.abspath(os.path.join("models","faceswaplab")) +MODELS_DIR = os.path.abspath(os.path.join("models", "faceswaplab")) ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers")) FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser")) VERSION_FLAG = "v1.1.0" -EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab") +EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab") NSFW_SCORE = 0.7 - diff --git a/scripts/faceswaplab_postprocessing/i2i_pp.py b/scripts/faceswaplab_postprocessing/i2i_pp.py index a0934c8..c5970e9 100644 --- a/scripts/faceswaplab_postprocessing/i2i_pp.py +++ b/scripts/faceswaplab_postprocessing/i2i_pp.py @@ -6,26 +6,29 @@ import numpy as np from modules import shared from scripts.faceswaplab_utils import imgutils from modules import shared, processing, codeformer_model -from modules.processing import (StableDiffusionProcessingImg2Img) -from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen +from modules.processing import StableDiffusionProcessingImg2Img +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, + InpaintingWhen, +) from modules import sd_models from scripts.faceswaplab_swapping import swapper -def img2img_diffusion(img : Image.Image, pp: PostProcessingOptions) -> Image.Image : - if pp.inpainting_denoising_strengh == 0 : +def img2img_diffusion(img: Image.Image, pp: PostProcessingOptions) -> Image.Image: + if pp.inpainting_denoising_strengh == 0: return img - try : + try: logger.info( -f"""Inpainting face + f"""Inpainting face Sampler : {pp.inpainting_sampler} inpainting_denoising_strength : {pp.inpainting_denoising_strengh} inpainting_steps : {pp.inpainting_steps} """ -) - if not isinstance(pp.inpainting_sampler, str) : + ) + if not isinstance(pp.inpainting_sampler, str): pass logger.info("send faces to image to image") @@ -33,44 +36,51 @@ inpainting_steps : {pp.inpainting_steps} faces = swapper.get_faces(imgutils.pil_to_cv2(img)) if faces: for face in faces: - bbox =face.bbox.astype(int) + bbox = face.bbox.astype(int) mask = imgutils.create_mask(img, bbox) - prompt = pp.inpainting_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman") - negative_prompt = pp.inpainting_negative_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman") + prompt = pp.inpainting_prompt.replace( + "[gender]", "man" if face["gender"] == 1 else "woman" + ) + negative_prompt = pp.inpainting_negative_prompt.replace( + "[gender]", "man" if face["gender"] == 1 else "woman" + ) logger.info("Denoising prompt : %s", prompt) - logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh) - - i2i_kwargs = {"sampler_name" :pp.inpainting_sampler, - "do_not_save_samples":True, - "steps" :pp.inpainting_steps, - "width" : img.width, - "inpainting_fill":1, - "inpaint_full_res":True, - "height" : img.height, - "mask": mask, - "prompt" : prompt, - "negative_prompt" :negative_prompt, - "denoising_strength" :pp.inpainting_denoising_strengh} + logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh) + + i2i_kwargs = { + "sampler_name": pp.inpainting_sampler, + "do_not_save_samples": True, + "steps": pp.inpainting_steps, + "width": img.width, + "inpainting_fill": 1, + "inpaint_full_res": True, + "height": img.height, + "mask": mask, + "prompt": prompt, + "negative_prompt": negative_prompt, + "denoising_strength": pp.inpainting_denoising_strengh, + } current_model_checkpoint = shared.opts.sd_model_checkpoint - if pp.inpainting_model and pp.inpainting_model != "Current" : + if pp.inpainting_model and pp.inpainting_model != "Current": # Change checkpoint shared.opts.sd_model_checkpoint = pp.inpainting_model sd_models.select_checkpoint sd_models.load_model() i2i_p = StableDiffusionProcessingImg2Img([img], **i2i_kwargs) i2i_processed = processing.process_images(i2i_p) - if pp.inpainting_model and pp.inpainting_model != "Current" : + if pp.inpainting_model and pp.inpainting_model != "Current": # Restore checkpoint shared.opts.sd_model_checkpoint = current_model_checkpoint sd_models.select_checkpoint sd_models.load_model() images = i2i_processed.images - if len(images) > 0 : + if len(images) > 0: img = images[0] return img - except Exception as e : + except Exception as e: logger.error("Failed to apply img2img to face : %s", e) import traceback + traceback.print_exc() raise e diff --git a/scripts/faceswaplab_postprocessing/postprocessing.py b/scripts/faceswaplab_postprocessing/postprocessing.py index 807f1e0..1732696 100644 --- a/scripts/faceswaplab_postprocessing/postprocessing.py +++ b/scripts/faceswaplab_postprocessing/postprocessing.py @@ -1,27 +1,30 @@ from modules.face_restoration import FaceRestoration from scripts.faceswaplab_utils.faceswaplab_logging import logger from PIL import Image -from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, + InpaintingWhen, +) from scripts.faceswaplab_postprocessing.i2i_pp import img2img_diffusion from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_face def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image: result_image = image - try : - if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value : + try: + if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value: result_image = img2img_diffusion(image, pp_options) result_image = upscale_img(result_image, pp_options) - if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value : - result_image = img2img_diffusion(image,pp_options) + if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value: + result_image = img2img_diffusion(image, pp_options) result_image = restore_face(result_image, pp_options) - - if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value : - result_image = img2img_diffusion(image,pp_options) + + if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value: + result_image = img2img_diffusion(image, pp_options) except Exception as e: logger.error("Failed to upscale %s", e) - return result_image \ No newline at end of file + return result_image diff --git a/scripts/faceswaplab_postprocessing/postprocessing_options.py b/scripts/faceswaplab_postprocessing/postprocessing_options.py index 663c80b..6ed6096 100644 --- a/scripts/faceswaplab_postprocessing/postprocessing_options.py +++ b/scripts/faceswaplab_postprocessing/postprocessing_options.py @@ -4,12 +4,14 @@ from dataclasses import dataclass from modules import shared from enum import Enum + class InpaintingWhen(Enum): NEVER = "Never" BEFORE_UPSCALING = "Before Upscaling/all" BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face" AFTER_ALL = "After All" + @dataclass class PostProcessingOptions: face_restorer_name: str = "" @@ -19,15 +21,15 @@ class PostProcessingOptions: upscaler_name: str = "" scale: int = 1 upscale_visibility: float = 0.5 - - inpainting_denoising_strengh : float = 0 - inpainting_prompt : str = "" - inpainting_negative_prompt : str = "" - inpainting_steps : int = 20 - inpainting_sampler : str = "Euler" - inpainting_when : InpaintingWhen = InpaintingWhen.BEFORE_UPSCALING - inpainting_model : str = "Current" - + + inpainting_denoising_strengh: float = 0 + inpainting_prompt: str = "" + inpainting_negative_prompt: str = "" + inpainting_steps: int = 20 + inpainting_sampler: str = "Euler" + inpainting_when: InpaintingWhen = InpaintingWhen.BEFORE_UPSCALING + inpainting_model: str = "Current" + @property def upscaler(self) -> UpscalerData: for upscaler in shared.sd_upscalers: @@ -40,4 +42,4 @@ class PostProcessingOptions: for face_restorer in shared.face_restorers: if face_restorer.name() == self.face_restorer_name: return face_restorer - return None \ No newline at end of file + return None diff --git a/scripts/faceswaplab_postprocessing/upscaling.py b/scripts/faceswaplab_postprocessing/upscaling.py index 9091e60..01fa438 100644 --- a/scripts/faceswaplab_postprocessing/upscaling.py +++ b/scripts/faceswaplab_postprocessing/upscaling.py @@ -1,11 +1,14 @@ - -from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, + InpaintingWhen, +) from scripts.faceswaplab_utils.faceswaplab_logging import logger from PIL import Image import numpy as np from modules import shared, processing, codeformer_model -def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image.Image : + +def upscale_img(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image: if pp_options.upscaler is not None and pp_options.upscaler.name != "None": original_image = image.copy() logger.info( @@ -23,15 +26,17 @@ def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image return result_image return image -def restore_face(image : Image.Image, pp_options : PostProcessingOptions) -> Image.Image : - + +def restore_face(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image: if pp_options.face_restorer is not None: original_image = image.copy() logger.info("Restore face with %s", pp_options.face_restorer.name()) numpy_image = np.array(image) - if pp_options.face_restorer_name == "CodeFormer" : - numpy_image = codeformer_model.codeformer.restore(numpy_image, w=pp_options.codeformer_weight) - else : + if pp_options.face_restorer_name == "CodeFormer": + numpy_image = codeformer_model.codeformer.restore( + numpy_image, w=pp_options.codeformer_weight + ) + else: numpy_image = pp_options.face_restorer.restore(numpy_image) restored_image = Image.fromarray(numpy_image) @@ -39,4 +44,4 @@ def restore_face(image : Image.Image, pp_options : PostProcessingOptions) -> Ima original_image, restored_image, pp_options.restorer_visibility ) return result_image - return image \ No newline at end of file + return image diff --git a/scripts/faceswaplab_settings/faceswaplab_settings.py b/scripts/faceswaplab_settings/faceswaplab_settings.py index 5d08af3..fddec02 100644 --- a/scripts/faceswaplab_settings/faceswaplab_settings.py +++ b/scripts/faceswaplab_settings/faceswaplab_settings.py @@ -2,52 +2,215 @@ from scripts.faceswaplab_utils.models_utils import get_models from modules import script_callbacks, shared import gradio as gr + def on_ui_settings(): - section = ('faceswaplab', "FaceSwapLab") + section = ("faceswaplab", "FaceSwapLab") models = get_models() - shared.opts.add_option("faceswaplab_model", shared.OptionInfo( - models[0] if len(models) > 0 else "None", "FaceSwapLab FaceSwap Model", gr.Dropdown, {"interactive": True, "choices" : models}, section=section)) - shared.opts.add_option("faceswaplab_keep_original", shared.OptionInfo( - False, "keep original image before swapping", gr.Checkbox, {"interactive": True}, section=section)) - shared.opts.add_option("faceswaplab_units_count", shared.OptionInfo( - 3, "Max faces units (requires restart)", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}, section=section)) - - shared.opts.add_option("faceswaplab_detection_threshold", shared.OptionInfo( - 0.5, "Detection threshold ", gr.Slider, {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, section=section)) - + shared.opts.add_option( + "faceswaplab_model", + shared.OptionInfo( + models[0] if len(models) > 0 else "None", + "FaceSwapLab FaceSwap Model", + gr.Dropdown, + {"interactive": True, "choices": models}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_keep_original", + shared.OptionInfo( + False, + "keep original image before swapping", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_units_count", + shared.OptionInfo( + 3, + "Max faces units (requires restart)", + gr.Slider, + {"minimum": 1, "maximum": 10, "step": 1}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_detection_threshold", + shared.OptionInfo( + 0.5, + "Detection threshold ", + gr.Slider, + {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, + section=section, + ), + ) - shared.opts.add_option("faceswaplab_pp_default_face_restorer", shared.OptionInfo( - None, "UI Default post processing face restorer (requires restart)", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section)) - shared.opts.add_option("faceswaplab_pp_default_face_restorer_visibility", shared.OptionInfo( - 1, "UI Default post processing face restorer visibility (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) - shared.opts.add_option("faceswaplab_pp_default_face_restorer_weight", shared.OptionInfo( - 1, "UI Default post processing face restorer weight (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) - shared.opts.add_option("faceswaplab_pp_default_upscaler", shared.OptionInfo( - None, "UI Default post processing upscaler (requires restart)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section)) - shared.opts.add_option("faceswaplab_pp_default_upscaler_visibility", shared.OptionInfo( - 1, "UI Default post processing upscaler visibility(requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) + shared.opts.add_option( + "faceswaplab_pp_default_face_restorer", + shared.OptionInfo( + None, + "UI Default post processing face restorer (requires restart)", + gr.Dropdown, + { + "interactive": True, + "choices": ["None"] + [x.name() for x in shared.face_restorers], + }, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_pp_default_face_restorer_visibility", + shared.OptionInfo( + 1, + "UI Default post processing face restorer visibility (requires restart)", + gr.Slider, + {"minimum": 0, "maximum": 1, "step": 0.001}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_pp_default_face_restorer_weight", + shared.OptionInfo( + 1, + "UI Default post processing face restorer weight (requires restart)", + gr.Slider, + {"minimum": 0, "maximum": 1, "step": 0.001}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_pp_default_upscaler", + shared.OptionInfo( + None, + "UI Default post processing upscaler (requires restart)", + gr.Dropdown, + { + "interactive": True, + "choices": [upscaler.name for upscaler in shared.sd_upscalers], + }, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_pp_default_upscaler_visibility", + shared.OptionInfo( + 1, + "UI Default post processing upscaler visibility(requires restart)", + gr.Slider, + {"minimum": 0, "maximum": 1, "step": 0.001}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper", + shared.OptionInfo( + False, + "Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_upscaler", + shared.OptionInfo( + None, + "Upscaled swapper upscaler (Recommanded : LDSR but slow)", + gr.Dropdown, + { + "interactive": True, + "choices": [upscaler.name for upscaler in shared.sd_upscalers], + }, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_sharpen", + shared.OptionInfo( + False, + "Upscaled swapper sharpen", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_fixcolor", + shared.OptionInfo( + False, + "Upscaled swapper color correction", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_improved_mask", + shared.OptionInfo( + True, + "Use improved segmented mask (use pastenet to mask only the face)", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_face_restorer", + shared.OptionInfo( + None, + "Upscaled swapper face restorer", + gr.Dropdown, + { + "interactive": True, + "choices": ["None"] + [x.name() for x in shared.face_restorers], + }, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_face_restorer_visibility", + shared.OptionInfo( + 1, + "Upscaled swapper face restorer visibility", + gr.Slider, + {"minimum": 0, "maximum": 1, "step": 0.001}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_face_restorer_weight", + shared.OptionInfo( + 1, + "Upscaled swapper face restorer weight (codeformer)", + gr.Slider, + {"minimum": 0, "maximum": 1, "step": 0.001}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_fthresh", + shared.OptionInfo( + 10, + "Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.", + gr.Slider, + {"minimum": 5, "maximum": 250, "step": 1}, + section=section, + ), + ) + shared.opts.add_option( + "faceswaplab_upscaled_swapper_erosion", + shared.OptionInfo( + 1, + "Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.", + gr.Slider, + {"minimum": 0, "maximum": 10, "step": 0.001}, + section=section, + ), + ) - shared.opts.add_option("faceswaplab_upscaled_swapper", shared.OptionInfo( - False, "Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.", gr.Checkbox, {"interactive": True}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_upscaler", shared.OptionInfo( - None, "Upscaled swapper upscaler (Recommanded : LDSR but slow)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_sharpen", shared.OptionInfo( - False, "Upscaled swapper sharpen", gr.Checkbox, {"interactive": True}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_fixcolor", shared.OptionInfo( - False, "Upscaled swapper color correction", gr.Checkbox, {"interactive": True}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_improved_mask", shared.OptionInfo( - True, "Use improved segmented mask (use pastenet to mask only the face)", gr.Checkbox, {"interactive": True}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer", shared.OptionInfo( - None, "Upscaled swapper face restorer", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_visibility", shared.OptionInfo( - 1, "Upscaled swapper face restorer visibility", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_weight", shared.OptionInfo( - 1, "Upscaled swapper face restorer weight (codeformer)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_fthresh", shared.OptionInfo( - 10, "Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.", gr.Slider, {"minimum": 5, "maximum": 250, "step": 1}, section=section)) - shared.opts.add_option("faceswaplab_upscaled_swapper_erosion", shared.OptionInfo( - 1, "Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.", gr.Slider, {"minimum": 0, "maximum": 10, "step": 0.001}, section=section)) -script_callbacks.on_ui_settings(on_ui_settings) \ No newline at end of file +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/scripts/faceswaplab_swapping/facemask.py b/scripts/faceswaplab_swapping/facemask.py index ec649bf..8af364c 100644 --- a/scripts/faceswaplab_swapping/facemask.py +++ b/scripts/faceswaplab_swapping/facemask.py @@ -7,21 +7,27 @@ from functools import lru_cache from typing import Union, List from torch import device as torch_device + @lru_cache def get_parsing_model(device: torch_device) -> torch.nn.Module: """ - Returns an instance of the parsing model. + Returns an instance of the parsing model. The returned model is cached for faster subsequent access. - + Args: device: The torch device to use for computations. - + Returns: The parsing model. """ return init_parsing_model(device=device) -def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert_bgr_to_rgb: bool = True, use_float32: bool = True) -> Union[torch.Tensor, List[torch.Tensor]]: + +def convert_image_to_tensor( + images: Union[np.ndarray, List[np.ndarray]], + convert_bgr_to_rgb: bool = True, + use_float32: bool = True, +) -> Union[torch.Tensor, List[torch.Tensor]]: """ Converts an image or a list of images to PyTorch tensor. @@ -33,10 +39,13 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert Returns: PyTorch tensor or a list of PyTorch tensors. """ - def _convert_single_image_to_tensor(image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool) -> torch.Tensor: + + def _convert_single_image_to_tensor( + image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool + ) -> torch.Tensor: if image.shape[2] == 3 and convert_bgr_to_rgb: - if image.dtype == 'float64': - image = image.astype('float32') + if image.dtype == "float64": + image = image.astype("float32") image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_tensor = torch.from_numpy(image.transpose(2, 0, 1)) if use_float32: @@ -44,10 +53,14 @@ def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert return image_tensor if isinstance(images, list): - return [_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32) for image in images] + return [ + _convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32) + for image in images + ] else: return _convert_single_image_to_tensor(images, convert_bgr_to_rgb, use_float32) + def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarray: """ Generates a face mask given a face image. @@ -60,12 +73,18 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr The face mask as a numpy.ndarray. """ # Resize the face image for the model - resized_face_image = cv2.resize(face_image, (512, 512), interpolation=cv2.INTER_LINEAR) - + resized_face_image = cv2.resize( + face_image, (512, 512), interpolation=cv2.INTER_LINEAR + ) + # Preprocess the image - face_input = convert_image_to_tensor((resized_face_image.astype('float32') / 255.0), convert_bgr_to_rgb=True, use_float32=True) + face_input = convert_image_to_tensor( + (resized_face_image.astype("float32") / 255.0), + convert_bgr_to_rgb=True, + use_float32=True, + ) normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) - assert isinstance(face_input,torch.Tensor) + assert isinstance(face_input, torch.Tensor) face_input = torch.unsqueeze(face_input, 0).to(device) # Pass the image through the model @@ -75,7 +94,27 @@ def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarr # Generate the mask from the model output parse_mask = np.zeros(model_output.shape) - MASK_COLOR_MAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0] + MASK_COLOR_MAP = [ + 0, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 255, + 0, + 255, + 0, + 0, + 0, + ] for idx, color in enumerate(MASK_COLOR_MAP): parse_mask[model_output == idx] = color diff --git a/scripts/faceswaplab_swapping/parsing/__init__.py b/scripts/faceswaplab_swapping/parsing/__init__.py index 2000ff7..6b4c5fd 100644 --- a/scripts/faceswaplab_swapping/parsing/__init__.py +++ b/scripts/faceswaplab_swapping/parsing/__init__.py @@ -5,36 +5,36 @@ S-Lab License 1.0 Copyright 2022 S-Lab -Redistribution and use for non-commercial purpose in source and -binary forms, with or without modification, are permitted provided +Redistribution and use for non-commercial purpose in source and +binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -In the event that redistribution and/or use for commercial purpose in -source or binary forms, with or without modification is required, +In the event that redistribution and/or use for commercial purpose in +source or binary forms, with or without modification is required, please contact the contributor(s) of the work. """ @@ -50,12 +50,12 @@ from scripts.faceswaplab_globals import FACE_PARSER_DIR ROOT_DIR = FACE_PARSER_DIR + def load_file_from_url(url, model_dir=None, progress=True, file_name=None): - """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py - """ + """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py""" if model_dir is None: hub_dir = get_dir() - model_dir = os.path.join(hub_dir, 'checkpoints') + model_dir = os.path.join(hub_dir, "checkpoints") os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True) @@ -70,10 +70,12 @@ def load_file_from_url(url, model_dir=None, progress=True, file_name=None): return cached_file -def init_parsing_model(device='cuda'): +def init_parsing_model(device="cuda"): model = ParseNet(in_size=512, out_size=512, parsing_ch=19) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth' - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) + model_url = "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth" + model_path = load_file_from_url( + url=model_url, model_dir="weights/facelib", progress=True, file_name=None + ) load_net = torch.load(model_path, map_location=lambda storage, loc: storage) model.load_state_dict(load_net, strict=True) model.eval() diff --git a/scripts/faceswaplab_swapping/parsing/parsenet.py b/scripts/faceswaplab_swapping/parsing/parsenet.py index 928591d..f3782b5 100644 --- a/scripts/faceswaplab_swapping/parsing/parsenet.py +++ b/scripts/faceswaplab_swapping/parsing/parsenet.py @@ -5,36 +5,36 @@ S-Lab License 1.0 Copyright 2022 S-Lab -Redistribution and use for non-commercial purpose in source and -binary forms, with or without modification, are permitted provided +Redistribution and use for non-commercial purpose in source and +binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -In the event that redistribution and/or use for commercial purpose in -source or binary forms, with or without modification is required, +In the event that redistribution and/or use for commercial purpose in +source or binary forms, with or without modification is required, please contact the contributor(s) of the work. Modified from https://github.com/chaofengc/PSFRGAN @@ -98,7 +98,7 @@ exhaustive, and do not form part of our licenses. such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations - for the public: + for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= @@ -499,27 +499,27 @@ class NormLayer(nn.Module): input_size: input shape without batch size, for layer norm. """ - def __init__(self, channels, normalize_shape=None, norm_type='bn'): + def __init__(self, channels, normalize_shape=None, norm_type="bn"): super(NormLayer, self).__init__() norm_type = norm_type.lower() self.norm_type = norm_type - if norm_type == 'bn': + if norm_type == "bn": self.norm = nn.BatchNorm2d(channels, affine=True) - elif norm_type == 'in': + elif norm_type == "in": self.norm = nn.InstanceNorm2d(channels, affine=False) - elif norm_type == 'gn': + elif norm_type == "gn": self.norm = nn.GroupNorm(32, channels, affine=True) - elif norm_type == 'pixel': + elif norm_type == "pixel": self.norm = lambda x: F.normalize(x, p=2, dim=1) - elif norm_type == 'layer': + elif norm_type == "layer": self.norm = nn.LayerNorm(normalize_shape) - elif norm_type == 'none': + elif norm_type == "none": self.norm = lambda x: x * 1.0 else: - assert 1 == 0, f'Norm type {norm_type} not support.' + assert 1 == 0, f"Norm type {norm_type} not support." def forward(self, x, ref=None): - if self.norm_type == 'spade': + if self.norm_type == "spade": return self.norm(x, ref) else: return self.norm(x) @@ -537,51 +537,56 @@ class ReluLayer(nn.Module): - none: direct pass """ - def __init__(self, channels, relu_type='relu'): + def __init__(self, channels, relu_type="relu"): super(ReluLayer, self).__init__() relu_type = relu_type.lower() - if relu_type == 'relu': + if relu_type == "relu": self.func = nn.ReLU(True) - elif relu_type == 'leakyrelu': + elif relu_type == "leakyrelu": self.func = nn.LeakyReLU(0.2, inplace=True) - elif relu_type == 'prelu': + elif relu_type == "prelu": self.func = nn.PReLU(channels) - elif relu_type == 'selu': + elif relu_type == "selu": self.func = nn.SELU(True) - elif relu_type == 'none': + elif relu_type == "none": self.func = lambda x: x * 1.0 else: - assert 1 == 0, f'Relu type {relu_type} not support.' + assert 1 == 0, f"Relu type {relu_type} not support." def forward(self, x): return self.func(x) class ConvLayer(nn.Module): - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - scale='none', - norm_type='none', - relu_type='none', - use_pad=True, - bias=True): + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + scale="none", + norm_type="none", + relu_type="none", + use_pad=True, + bias=True, + ): super(ConvLayer, self).__init__() self.use_pad = use_pad self.norm_type = norm_type - if norm_type in ['bn']: + if norm_type in ["bn"]: bias = False - stride = 2 if scale == 'down' else 1 + stride = 2 if scale == "down" else 1 self.scale_func = lambda x: x - if scale == 'up': - self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') + if scale == "up": + self.scale_func = lambda x: nn.functional.interpolate( + x, scale_factor=2, mode="nearest" + ) - self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2))) - self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) + self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.0) / 2))) + self.conv2d = nn.Conv2d( + in_channels, out_channels, kernel_size, stride, bias=bias + ) self.relu = ReluLayer(out_channels, relu_type) self.norm = NormLayer(out_channels, norm_type=norm_type) @@ -601,19 +606,27 @@ class ResidualBlock(nn.Module): Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html """ - def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'): + def __init__(self, c_in, c_out, relu_type="prelu", norm_type="bn", scale="none"): super(ResidualBlock, self).__init__() - if scale == 'none' and c_in == c_out: + if scale == "none" and c_in == c_out: self.shortcut_func = lambda x: x else: self.shortcut_func = ConvLayer(c_in, c_out, 3, scale) - scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']} + scale_config_dict = { + "down": ["none", "down"], + "up": ["up", "none"], + "none": ["none", "none"], + } scale_conf = scale_config_dict[scale] - self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type) - self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none') + self.conv1 = ConvLayer( + c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type + ) + self.conv2 = ConvLayer( + c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type="none" + ) def forward(self, x): identity = self.shortcut_func(x) @@ -624,20 +637,21 @@ class ResidualBlock(nn.Module): class ParseNet(nn.Module): - - def __init__(self, - in_size=128, - out_size=128, - min_feat_size=32, - base_ch=64, - parsing_ch=19, - res_depth=10, - relu_type='LeakyReLU', - norm_type='bn', - ch_range=[32, 256]): + def __init__( + self, + in_size=128, + out_size=128, + min_feat_size=32, + base_ch=64, + parsing_ch=19, + res_depth=10, + relu_type="LeakyReLU", + norm_type="bn", + ch_range=[32, 256], + ): super().__init__() self.res_depth = res_depth - act_args = {'norm_type': norm_type, 'relu_type': relu_type} + act_args = {"norm_type": norm_type, "relu_type": relu_type} min_ch, max_ch = ch_range ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731 @@ -652,17 +666,19 @@ class ParseNet(nn.Module): head_ch = base_ch for i in range(down_steps): cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2) - self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args)) + self.encoder.append(ResidualBlock(cin, cout, scale="down", **act_args)) head_ch = head_ch * 2 self.body = [] for i in range(res_depth): - self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args)) + self.body.append( + ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args) + ) self.decoder = [] for i in range(up_steps): cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2) - self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args)) + self.decoder.append(ResidualBlock(cin, cout, scale="up", **act_args)) head_ch = head_ch // 2 self.encoder = nn.Sequential(*self.encoder) diff --git a/scripts/faceswaplab_swapping/swapper.py b/scripts/faceswaplab_swapping/swapper.py index 5556a2e..5a138dd 100644 --- a/scripts/faceswaplab_swapping/swapper.py +++ b/scripts/faceswaplab_swapping/swapper.py @@ -12,7 +12,11 @@ from PIL import Image from sklearn.metrics.pairwise import cosine_similarity from scripts.faceswaplab_swapping import upscaled_inswapper -from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2, check_against_nsfw +from scripts.faceswaplab_utils.imgutils import ( + cv2_to_pil, + pil_to_cv2, + check_against_nsfw, +) from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug from scripts import faceswaplab_globals from modules.shared import opts @@ -48,19 +52,20 @@ def cosine_similarity_face(face1, face2) -> float: # Return the maximum of 0 and the calculated similarity as the final similarity score return max(0, similarity[0, 0]) + def compare_faces(img1: Image.Image, img2: Image.Image) -> float: """ Compares the similarity between two faces extracted from images using cosine similarity. - + Args: img1: The first image containing a face. img2: The second image containing a face. - + Returns: - A float value representing the similarity between the two faces (0 to 1). + A float value representing the similarity between the two faces (0 to 1). Returns -1 if one or both of the images do not contain any faces. """ - + # Extract faces from the images face1 = get_or_default(get_faces(pil_to_cv2(img1)), 0, None) face2 = get_or_default(get_faces(pil_to_cv2(img2)), 0, None) @@ -69,13 +74,14 @@ def compare_faces(img1: Image.Image, img2: Image.Image) -> float: if face1 is not None and face2 is not None: # Calculate the cosine similarity between the faces return cosine_similarity_face(face1, face2) - + # Return -1 if one or both of the images do not contain any faces return -1 class FaceModelException(Exception): """Exception raised when an error is encountered in the face model.""" + def __init__(self, message: str) -> None: """ Args: @@ -84,15 +90,16 @@ class FaceModelException(Exception): self.message = message super().__init__(self.message) + @lru_cache(maxsize=1) def getAnalysisModel(): """ Retrieves the analysis model for face analysis. - + Returns: insightface.app.FaceAnalysis: The analysis model for face analysis. """ - try : + try: if not os.path.exists(faceswaplab_globals.ANALYZER_DIR): os.makedirs(faceswaplab_globals.ANALYZER_DIR) @@ -101,10 +108,13 @@ def getAnalysisModel(): return insightface.app.FaceAnalysis( name="buffalo_l", providers=providers, root=faceswaplab_globals.ANALYZER_DIR ) - except Exception as e : - logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)") + except Exception as e: + logger.error( + "Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)" + ) raise FaceModelException("Loading of swapping model failed") - + + @lru_cache(maxsize=1) def getFaceSwapModel(model_path: str): """ @@ -116,14 +126,23 @@ def getFaceSwapModel(model_path: str): Returns: insightface.model_zoo.FaceModel: The face swap model. """ - try : + try: # Initializes the face swap model using the specified model path. - return upscaled_inswapper.UpscaledINSwapper(insightface.model_zoo.get_model(model_path, providers=providers)) - except Exception as e : - logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)") + return upscaled_inswapper.UpscaledINSwapper( + insightface.model_zoo.get_model(model_path, providers=providers) + ) + except Exception as e: + logger.error( + "Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)" + ) -def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[int]=None, sort_by_face_size = False) -> List[Face]: +def get_faces( + img_data: np.ndarray, + det_size=(640, 640), + det_thresh: Optional[int] = None, + sort_by_face_size=False, +) -> List[Face]: """ Detects and retrieves faces from an image using an analysis model. @@ -136,7 +155,7 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i list: A list of detected faces, sorted by their x-coordinate of the bounding box. """ - if det_thresh is None : + if det_thresh is None: det_thresh = opts.data.get("faceswaplab_detection_threshold", 0.5) # Create a deep copy of the analysis model (otherwise det_size is attached to the analysis model and can't be changed) @@ -155,8 +174,12 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i return get_faces(img_data, det_size=det_size_half, det_thresh=det_thresh) try: - if sort_by_face_size : - return sorted(face, reverse=True, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1])) + if sort_by_face_size: + return sorted( + face, + reverse=True, + key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]), + ) # Sort the detected faces based on their x-coordinate of the bounding box return sorted(face, key=lambda x: x.bbox[0]) @@ -164,7 +187,6 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[i return [] - @dataclass class ImageResult: """ @@ -222,12 +244,15 @@ def get_faces_from_img_files(files): if len(files) > 0: for file in files: img = Image.open(file.name) # Open the image file - face = get_or_default(get_faces(pil_to_cv2(img)), 0, None) # Extract faces from the image + face = get_or_default( + get_faces(pil_to_cv2(img)), 0, None + ) # Extract faces from the image if face is not None: faces.append(face) # Add the detected face to the list of faces return faces + def blend_faces(faces: List[Face]) -> Face: """ Blends the embeddings of multiple faces into a single face. @@ -238,16 +263,16 @@ def blend_faces(faces: List[Face]) -> Face: Returns: Face: The blended Face object with the averaged embedding. Returns None if the input list is empty. - + Raises: ValueError: If the embeddings have different shapes. """ embeddings = [face.embedding for face in faces] - + if len(embeddings) > 0: embedding_shape = embeddings[0].shape - + # Check if all embeddings have the same shape for embedding in embeddings: if embedding.shape != embedding_shape: @@ -255,15 +280,21 @@ def blend_faces(faces: List[Face]) -> Face: # Compute the mean of all embeddings blended_embedding = np.mean(embeddings, axis=0) - + # Create a new Face object using the properties of the first face in the list # Assign the blended embedding to the blended Face object - blended = Face(embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age) + blended = Face( + embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age + ) + + assert ( + not np.array_equal(blended.embedding, faces[0].embedding) + if len(faces) > 1 + else True + ), "If len(faces)>0, the blended embedding should not be the same than the first image" - assert not np.array_equal(blended.embedding,faces[0].embedding) if len(faces) > 1 else True, "If len(faces)>0, the blended embedding should not be the same than the first image" - return blended - + # Return None if the input list is empty return None @@ -275,9 +306,9 @@ def swap_face( model: str, faces_index: Set[int] = {0}, same_gender=True, - upscaled_swapper = False, - compute_similarity = True, - sort_by_face_size = False + upscaled_swapper=False, + compute_similarity=True, + sort_by_face_size=False, ) -> ImageResult: """ Swaps faces in the target image with the source face. @@ -293,9 +324,9 @@ def swap_face( Returns: ImageResult: An object containing the swapped image and similarity scores. - """ + """ return_result = ImageResult(target_img, {}, {}) - try : + try: target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR) gender = source_face["gender"] logger.info("Source Gender %s", gender) @@ -313,19 +344,23 @@ def swap_face( for i, swapped_face in enumerate(target_faces): logger.info(f"swap face {i}") if i in faces_index: - result = face_swapper.get(result, swapped_face, source_face, upscale = upscaled_swapper) + result = face_swapper.get( + result, swapped_face, source_face, upscale=upscaled_swapper + ) result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) return_result.image = result_image - - if compute_similarity : + if compute_similarity: try: result_faces = get_faces( - cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR), sort_by_face_size=sort_by_face_size + cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR), + sort_by_face_size=sort_by_face_size, ) if same_gender: - result_faces = [x for x in result_faces if x["gender"] == gender] + result_faces = [ + x for x in result_faces if x["gender"] == gender + ] for i, swapped_face in enumerate(result_faces): logger.info(f"compare face {i}") @@ -343,13 +378,20 @@ def swap_face( except Exception as e: logger.error("Similarity processing failed %s", e) raise e - except Exception as e : + except Exception as e: logger.error("Conversion failed %s", e) raise e return return_result -def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, info = None, upscaled_swapper = False, force_blend = False) -> List: +def process_image_unit( + model, + unit: FaceSwapUnitSettings, + image: Image.Image, + info=None, + upscaled_swapper=False, + force_blend=False, +) -> List: """Process one image and return a List of (image, info) (one if blended, many if not). Args: @@ -362,23 +404,28 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i """ results = [] - if unit.enable : - if check_against_nsfw(image) : + if unit.enable: + if check_against_nsfw(image): return [(image, info)] - if not unit.blend_faces and not force_blend : + if not unit.blend_faces and not force_blend: src_faces = unit.faces logger.info(f"will generate {len(src_faces)} images") - else : + else: logger.info("blend all faces together") src_faces = [unit.blended_faces] - assert(not np.array_equal(unit.reference_face.embedding,src_faces[0].embedding) if len(unit.faces)>1 else True), "Reference face cannot be the same as blended" - + assert ( + not np.array_equal( + unit.reference_face.embedding, src_faces[0].embedding + ) + if len(unit.faces) > 1 + else True + ), "Reference face cannot be the same as blended" - for i,src_face in enumerate(src_faces): + for i, src_face in enumerate(src_faces): logger.info(f"Process face {i}") - if unit.reference_face is not None : + if unit.reference_face is not None: reference_face = unit.reference_face - else : + else: logger.info("Use source face as reference face") reference_face = src_face @@ -392,14 +439,30 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i same_gender=unit.same_gender, upscaled_swapper=upscaled_swapper, compute_similarity=unit.compute_similarity, - sort_by_face_size=unit.sort_by_size + sort_by_face_size=unit.sort_by_size, ) save_img_debug(result.image, "After swap") - if result.image is None : + if result.image is None: logger.error("Result image is None") - if (not unit.check_similarity) or result.similarity and all([result.similarity.values()!=0]+[x >= unit.min_sim for x in result.similarity.values()]) and all([result.ref_similarity.values()!=0]+[x >= unit.min_ref_sim for x in result.ref_similarity.values()]): - results.append((result.image, f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}")) + if ( + (not unit.check_similarity) + or result.similarity + and all( + [result.similarity.values() != 0] + + [x >= unit.min_sim for x in result.similarity.values()] + ) + and all( + [result.ref_similarity.values() != 0] + + [x >= unit.min_ref_sim for x in result.ref_similarity.values()] + ) + ): + results.append( + ( + result.image, + f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}", + ) + ) else: logger.warning( f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})" @@ -407,22 +470,33 @@ def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, i logger.debug("process_image_unit : Unit produced %s results", len(results)) return results -def process_images_units(model, units : List[FaceSwapUnitSettings], images: List[Tuple[Optional[Image.Image], Optional[str]]], upscaled_swapper = False, force_blend = False) -> Union[List,None]: - if len(units) == 0 : + +def process_images_units( + model, + units: List[FaceSwapUnitSettings], + images: List[Tuple[Optional[Image.Image], Optional[str]]], + upscaled_swapper=False, + force_blend=False, +) -> Union[List, None]: + if len(units) == 0: logger.info("Finished processing image, return %s images", len(images)) return None - + logger.debug("%s more units", len(units)) processed_images = [] - for i,(image, info) in enumerate(images) : + for i, (image, info) in enumerate(images): logger.debug("Processing image %s", i) - swapped = process_image_unit(model,units[0],image, info, upscaled_swapper, force_blend) + swapped = process_image_unit( + model, units[0], image, info, upscaled_swapper, force_blend + ) logger.debug("Image %s -> %s images", i, len(swapped)) - nexts = process_images_units(model,units[1:],swapped, upscaled_swapper,force_blend) - if nexts : + nexts = process_images_units( + model, units[1:], swapped, upscaled_swapper, force_blend + ) + if nexts: processed_images.extend(nexts) - else : + else: processed_images.extend(swapped) - - return processed_images \ No newline at end of file + + return processed_images diff --git a/scripts/faceswaplab_swapping/upscaled_inswapper.py b/scripts/faceswaplab_swapping/upscaled_inswapper.py index d112a6d..fc17ce0 100644 --- a/scripts/faceswaplab_swapping/upscaled_inswapper.py +++ b/scripts/faceswaplab_swapping/upscaled_inswapper.py @@ -1,4 +1,3 @@ - import cv2 import numpy as np import onnx @@ -14,18 +13,22 @@ from PIL import Image from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_postprocessing import upscaling -from scripts.faceswaplab_postprocessing.postprocessing_options import \ - PostProcessingOptions +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, +) from scripts.faceswaplab_swapping.facemask import generate_face_mask from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2 def get_upscaler() -> UpscalerData: for upscaler in shared.sd_upscalers: - if upscaler.name == opts.data.get("faceswaplab_upscaled_swapper_upscaler", "LDSR"): + if upscaler.name == opts.data.get( + "faceswaplab_upscaled_swapper_upscaler", "LDSR" + ): return upscaler return None + def merge_images_with_mask(image1, image2, mask): if image1.shape != image2.shape or image1.shape[:2] != mask.shape: raise ValueError("Img should have the same shape") @@ -36,153 +39,202 @@ def merge_images_with_mask(image1, image2, mask): merged_image = cv2.add(empty_region, masked_region) return merged_image + def erode_mask(mask, kernel_size=3, iterations=1): kernel = np.ones((kernel_size, kernel_size), np.uint8) eroded_mask = cv2.erode(mask, kernel, iterations=iterations) return eroded_mask + def apply_gaussian_blur(mask, kernel_size=(5, 5), sigma_x=0): blurred_mask = cv2.GaussianBlur(mask, kernel_size, sigma_x) return blurred_mask + def dilate_mask(mask, kernel_size=5, iterations=1): kernel = np.ones((kernel_size, kernel_size), np.uint8) dilated_mask = cv2.dilate(mask, kernel, iterations=iterations) return dilated_mask -def get_face_mask(aimg,bgr_fake): - mask1 = generate_face_mask(aimg, device = shared.device) - mask2 = generate_face_mask(bgr_fake, device = shared.device) - mask = dilate_mask(cv2.bitwise_or(mask1,mask2)) + +def get_face_mask(aimg, bgr_fake): + mask1 = generate_face_mask(aimg, device=shared.device) + mask2 = generate_face_mask(bgr_fake, device=shared.device) + mask = dilate_mask(cv2.bitwise_or(mask1, mask2)) return mask -class UpscaledINSwapper(): - def __init__(self, inswapper : INSwapper): +class UpscaledINSwapper: + def __init__(self, inswapper: INSwapper): self.__dict__.update(inswapper.__dict__) def forward(self, img, latent): img = (img - self.input_mean) / self.input_std - pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0] + pred = self.session.run( + self.output_names, {self.input_names[0]: img, self.input_names[1]: latent} + )[0] return pred - def super_resolution(self,img, k = 2) : + def super_resolution(self, img, k=2): pil_img = cv2_to_pil(img) options = PostProcessingOptions( - upscaler_name=opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR'), + upscaler_name=opts.data.get( + "faceswaplab_upscaled_swapper_upscaler", "LDSR" + ), upscale_visibility=1, scale=k, - face_restorer_name=opts.data.get('faceswaplab_upscaled_swapper_face_restorer', ""), - codeformer_weight= opts.data.get('faceswaplab_upscaled_swapper_face_restorer_weight', 1), - restorer_visibility=opts.data.get('faceswaplab_upscaled_swapper_face_restorer_visibility', 1)) + face_restorer_name=opts.data.get( + "faceswaplab_upscaled_swapper_face_restorer", "" + ), + codeformer_weight=opts.data.get( + "faceswaplab_upscaled_swapper_face_restorer_weight", 1 + ), + restorer_visibility=opts.data.get( + "faceswaplab_upscaled_swapper_face_restorer_visibility", 1 + ), + ) upscaled = upscaling.upscale_img(pil_img, options) upscaled = upscaling.restore_face(upscaled, options) return pil_to_cv2(upscaled) - def get(self, img, target_face, source_face, paste_back=True, upscale = True): + def get(self, img, target_face, source_face, paste_back=True, upscale=True): aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]) - blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size, - (self.input_mean, self.input_mean, self.input_mean), swapRB=True) - latent = source_face.normed_embedding.reshape((1,-1)) + blob = cv2.dnn.blobFromImage( + aimg, + 1.0 / self.input_std, + self.input_size, + (self.input_mean, self.input_mean, self.input_mean), + swapRB=True, + ) + latent = source_face.normed_embedding.reshape((1, -1)) latent = np.dot(latent, self.emap) latent /= np.linalg.norm(latent) - pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0] - #print(latent.shape, latent.dtype, pred.shape) - img_fake = pred.transpose((0,2,3,1))[0] - bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1] - - try : + pred = self.session.run( + self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent} + )[0] + # print(latent.shape, latent.dtype, pred.shape) + img_fake = pred.transpose((0, 2, 3, 1))[0] + bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:, :, ::-1] + + try: if not paste_back: return bgr_fake, M else: target_img = img - def compute_diff(bgr_fake,aimg) : + def compute_diff(bgr_fake, aimg): fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32) fake_diff = np.abs(fake_diff).mean(axis=2) - fake_diff[:2,:] = 0 - fake_diff[-2:,:] = 0 - fake_diff[:,:2] = 0 - fake_diff[:,-2:] = 0 + fake_diff[:2, :] = 0 + fake_diff[-2:, :] = 0 + fake_diff[:, :2] = 0 + fake_diff[:, -2:] = 0 return fake_diff - if upscale : + if upscale: + print("*" * 80) + print( + f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}" + ) + print("*" * 80) - print("*"*80) - print(f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}") - print("*"*80) - k = 4 - aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]*k) - + aimg, M = face_align.norm_crop2( + img, target_face.kps, self.input_size[0] * k + ) + # upscale and restore face : bgr_fake = self.super_resolution(bgr_fake, k) - - if opts.data.get("faceswaplab_upscaled_improved_mask", True) : - mask = get_face_mask(aimg,bgr_fake) - bgr_fake = merge_images_with_mask(aimg, bgr_fake,mask) + + if opts.data.get("faceswaplab_upscaled_improved_mask", True): + mask = get_face_mask(aimg, bgr_fake) + bgr_fake = merge_images_with_mask(aimg, bgr_fake, mask) # compute fake_diff before sharpen and color correction (better result) fake_diff = compute_diff(bgr_fake, aimg) - if opts.data.get("faceswaplab_upscaled_swapper_sharpen", True) : + if opts.data.get("faceswaplab_upscaled_swapper_sharpen", True): print("sharpen") # Add sharpness blurred = cv2.GaussianBlur(bgr_fake, (0, 0), 3) bgr_fake = cv2.addWeighted(bgr_fake, 1.5, blurred, -0.5, 0) # Apply color corrections - if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True) : + if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True): print("color correction") correction = processing.setup_color_correction(cv2_to_pil(aimg)) - bgr_fake_pil = processing.apply_color_correction(correction, cv2_to_pil(bgr_fake)) + bgr_fake_pil = processing.apply_color_correction( + correction, cv2_to_pil(bgr_fake) + ) bgr_fake = pil_to_cv2(bgr_fake_pil) - - else : + else: fake_diff = compute_diff(bgr_fake, aimg) IM = cv2.invertAffineTransform(M) - img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32) - bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) - img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) - fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0) - img_white[img_white>20] = 255 - fthresh = opts.data.get('faceswaplab_upscaled_swapper_fthresh', 10) + img_white = np.full( + (aimg.shape[0], aimg.shape[1]), 255, dtype=np.float32 + ) + bgr_fake = cv2.warpAffine( + bgr_fake, + IM, + (target_img.shape[1], target_img.shape[0]), + borderValue=0.0, + ) + img_white = cv2.warpAffine( + img_white, + IM, + (target_img.shape[1], target_img.shape[0]), + borderValue=0.0, + ) + fake_diff = cv2.warpAffine( + fake_diff, + IM, + (target_img.shape[1], target_img.shape[0]), + borderValue=0.0, + ) + img_white[img_white > 20] = 255 + fthresh = opts.data.get("faceswaplab_upscaled_swapper_fthresh", 10) print("fthresh", fthresh) - fake_diff[fake_diff=fthresh] = 255 + fake_diff[fake_diff < fthresh] = 0 + fake_diff[fake_diff >= fthresh] = 255 img_mask = img_white - mask_h_inds, mask_w_inds = np.where(img_mask==255) + mask_h_inds, mask_w_inds = np.where(img_mask == 255) mask_h = np.max(mask_h_inds) - np.min(mask_h_inds) mask_w = np.max(mask_w_inds) - np.min(mask_w_inds) - mask_size = int(np.sqrt(mask_h*mask_w)) - erosion_factor = opts.data.get('faceswaplab_upscaled_swapper_erosion', 1) - k = max(int(mask_size//10*erosion_factor), int(10*erosion_factor)) - - kernel = np.ones((k,k),np.uint8) - img_mask = cv2.erode(img_mask,kernel,iterations = 1) - kernel = np.ones((2,2),np.uint8) - fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1) - k = max(int(mask_size//20*erosion_factor), int(5*erosion_factor)) - + mask_size = int(np.sqrt(mask_h * mask_w)) + erosion_factor = opts.data.get( + "faceswaplab_upscaled_swapper_erosion", 1 + ) + k = max(int(mask_size // 10 * erosion_factor), int(10 * erosion_factor)) + + kernel = np.ones((k, k), np.uint8) + img_mask = cv2.erode(img_mask, kernel, iterations=1) + kernel = np.ones((2, 2), np.uint8) + fake_diff = cv2.dilate(fake_diff, kernel, iterations=1) + k = max(int(mask_size // 20 * erosion_factor), int(5 * erosion_factor)) kernel_size = (k, k) - blur_size = tuple(2*i+1 for i in kernel_size) + blur_size = tuple(2 * i + 1 for i in kernel_size) img_mask = cv2.GaussianBlur(img_mask, blur_size, 0) - k = int(5*erosion_factor) + k = int(5 * erosion_factor) kernel_size = (k, k) - blur_size = tuple(2*i+1 for i in kernel_size) + blur_size = tuple(2 * i + 1 for i in kernel_size) fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0) img_mask /= 255 fake_diff /= 255 - img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1]) - fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32) + img_mask = np.reshape( + img_mask, [img_mask.shape[0], img_mask.shape[1], 1] + ) + fake_merged = img_mask * bgr_fake + (1 - img_mask) * target_img.astype( + np.float32 + ) fake_merged = fake_merged.astype(np.uint8) return fake_merged - except Exception as e : + except Exception as e: import traceback + traceback.print_exc() raise e diff --git a/scripts/faceswaplab_ui/faceswaplab_tab.py b/scripts/faceswaplab_ui/faceswaplab_tab.py index 86e799b..d46c18e 100644 --- a/scripts/faceswaplab_ui/faceswaplab_tab.py +++ b/scripts/faceswaplab_ui/faceswaplab_tab.py @@ -13,20 +13,23 @@ from scripts.faceswaplab_ui.faceswaplab_upscaler_ui import upscaler_ui from insightface.app.common import Face from modules import script_callbacks, scripts from PIL import Image -from modules.shared import opts +from modules.shared import opts from scripts.faceswaplab_utils import imgutils from scripts.faceswaplab_utils.imgutils import pil_to_cv2 from scripts.faceswaplab_utils.models_utils import get_models from scripts.faceswaplab_utils.faceswaplab_logging import logger import scripts.faceswaplab_swapping.swapper as swapper -from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions +from scripts.faceswaplab_postprocessing.postprocessing_options import ( + PostProcessingOptions, +) from scripts.faceswaplab_postprocessing.postprocessing import enhance_image from dataclasses import fields from typing import List from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_utils.models_utils import get_current_model + def compare(img1, img2): if img1 is not None and img2 is not None: return swapper.compare_faces(img1, img2) @@ -34,13 +37,27 @@ def compare(img1, img2): return "You need 2 images to compare" - -def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibility, codeformer_weight,upscaler_name,upscaler_scale, upscaler_visibility,inpainting_denoising_strengh, inpainting_prompt, inpainting_negative_prompt, inpainting_steps, inpainting_sampler,inpainting_when): - if not extract_path : +def extract_faces( + files, + extract_path, + face_restorer_name, + face_restorer_visibility, + codeformer_weight, + upscaler_name, + upscaler_scale, + upscaler_visibility, + inpainting_denoising_strengh, + inpainting_prompt, + inpainting_negative_prompt, + inpainting_steps, + inpainting_sampler, + inpainting_when, +): + if not extract_path: tempfile.mkdtemp() if files is not None: images = [] - for file in files : + for file in files: img = Image.open(file.name).convert("RGB") faces = swapper.get_faces(pil_to_cv2(img)) if faces: @@ -50,40 +67,49 @@ def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibi x_min, y_min, x_max, y_max = bbox face_image = img.crop((x_min, y_min, x_max, y_max)) if face_restorer_name or face_restorer_visibility: - scale = 1 if face_image.width > 512 else 512//face_image.width - face_image = enhance_image(face_image, PostProcessingOptions(face_restorer_name=face_restorer_name, - restorer_visibility=face_restorer_visibility, - codeformer_weight= codeformer_weight, - upscaler_name=upscaler_name, - upscale_visibility=upscaler_visibility, - scale=scale, - inpainting_denoising_strengh=inpainting_denoising_strengh, - inpainting_prompt=inpainting_prompt, - inpainting_steps=inpainting_steps, - inpainting_negative_prompt=inpainting_negative_prompt, - inpainting_when=inpainting_when, - inpainting_sampler=inpainting_sampler)) - path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=extract_path).name + scale = 1 if face_image.width > 512 else 512 // face_image.width + face_image = enhance_image( + face_image, + PostProcessingOptions( + face_restorer_name=face_restorer_name, + restorer_visibility=face_restorer_visibility, + codeformer_weight=codeformer_weight, + upscaler_name=upscaler_name, + upscale_visibility=upscaler_visibility, + scale=scale, + inpainting_denoising_strengh=inpainting_denoising_strengh, + inpainting_prompt=inpainting_prompt, + inpainting_steps=inpainting_steps, + inpainting_negative_prompt=inpainting_negative_prompt, + inpainting_when=inpainting_when, + inpainting_sampler=inpainting_sampler, + ), + ) + path = tempfile.NamedTemporaryFile( + delete=False, suffix=".png", dir=extract_path + ).name face_image.save(path) face_images.append(path) - images+= face_images + images += face_images return images return None -def analyse_faces(image, det_threshold = 0.5) : - try : + +def analyse_faces(image, det_threshold=0.5): + try: faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold) result = "" - for i,face in enumerate(faces) : - result+= f"\nFace {i} \n" + "="*40 +"\n" - result+= pformat(face) + "\n" - result+= "="*40 + for i, face in enumerate(faces): + result += f"\nFace {i} \n" + "=" * 40 + "\n" + result += pformat(face) + "\n" + result += "=" * 40 return result - except Exception as e : + except Exception as e: logger.error("Analysis Failed : %s", e) return "Analysis Failed" - + + def build_face_checkpoint_and_save(batch_files, name): """ Builds a face checkpoint, swaps faces, and saves the result to a file. @@ -102,7 +128,7 @@ def build_face_checkpoint_and_save(batch_files, name): preview_path = os.path.join( scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references" ) - faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab","faces") + faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces") if not os.path.exists(faces_path): os.makedirs(faces_path) @@ -116,22 +142,36 @@ def build_face_checkpoint_and_save(batch_files, name): if name == "": name = "default_name" pprint(blended_face) - result = swapper.swap_face(blended_face, blended_face, target_img, get_models()[0]) - result_image = enhance_image(result.image, PostProcessingOptions(face_restorer_name="CodeFormer", restorer_visibility=1)) - + result = swapper.swap_face( + blended_face, blended_face, target_img, get_models()[0] + ) + result_image = enhance_image( + result.image, + PostProcessingOptions( + face_restorer_name="CodeFormer", restorer_visibility=1 + ), + ) + file_path = os.path.join(faces_path, f"{name}.pkl") file_number = 1 while os.path.exists(file_path): file_path = os.path.join(faces_path, f"{name}_{file_number}.pkl") file_number += 1 - result_image.save(file_path+".png") + result_image.save(file_path + ".png") with open(file_path, "wb") as file: - pickle.dump({"embedding" :blended_face.embedding, "gender" :blended_face.gender, "age" :blended_face.age},file) - try : + pickle.dump( + { + "embedding": blended_face.embedding, + "gender": blended_face.gender, + "age": blended_face.age, + }, + file, + ) + try: with open(file_path, "rb") as file: data = Face(pickle.load(file)) print(data) - except Exception as e : + except Exception as e: print(e) return result_image @@ -139,48 +179,52 @@ def build_face_checkpoint_and_save(batch_files, name): return target_img + def explore_onnx_faceswap_model(model_path): data = { - 'Node Name': [], - 'Op Type': [], - 'Inputs': [], - 'Outputs': [], - 'Attributes': [] + "Node Name": [], + "Op Type": [], + "Inputs": [], + "Outputs": [], + "Attributes": [], } if model_path: model = onnx.load(model_path) for node in model.graph.node: - data['Node Name'].append(pformat(node.name)) - data['Op Type'].append(pformat(node.op_type)) - data['Inputs'].append(pformat(node.input)) - data['Outputs'].append(pformat(node.output)) + data["Node Name"].append(pformat(node.name)) + data["Op Type"].append(pformat(node.op_type)) + data["Inputs"].append(pformat(node.input)) + data["Outputs"].append(pformat(node.output)) attributes = [] for attr in node.attribute: attr_name = attr.name attr_value = attr.t - attributes.append("{} = {}".format(pformat(attr_name), pformat(attr_value))) - data['Attributes'].append(attributes) + attributes.append( + "{} = {}".format(pformat(attr_name), pformat(attr_value)) + ) + data["Attributes"].append(attributes) df = pd.DataFrame(data) return df -def batch_process(files, save_path, *components): - try : + +def batch_process(files, save_path, *components): + try: if save_path is not None: os.makedirs(save_path, exist_ok=True) units_count = opts.data.get("faceswaplab_units_count", 3) units: List[FaceSwapUnitSettings] = [] - #Parse and convert units flat components into FaceSwapUnitSettings + # Parse and convert units flat components into FaceSwapUnitSettings for i in range(0, units_count): - units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] + units += [FaceSwapUnitSettings.get_unit_configuration(i, components)] for i, u in enumerate(units): logger.debug("%s, %s", pformat(i), pformat(u)) - #Parse the postprocessing options - #We must first find where to start from (after face swapping units) + # Parse the postprocessing options + # We must first find where to start from (after face swapping units) len_conf: int = len(fields(FaceSwapUnitSettings)) shift: int = units_count * len_conf postprocess_options = PostProcessingOptions( @@ -191,26 +235,36 @@ def batch_process(files, save_path, *components): units = [u for u in units if u.enable] if files is not None: images = [] - for file in files : + for file in files: current_images = [] src_image = Image.open(file.name).convert("RGB") - swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False)) + swapped_images = swapper.process_images_units( + get_current_model(), + images=[(src_image, None)], + units=units, + upscaled_swapper=opts.data.get( + "faceswaplab_upscaled_swapper", False + ), + ) if len(swapped_images) > 0: - current_images+= [img for img,info in swapped_images] + current_images += [img for img, info in swapped_images] logger.info("%s images generated", len(current_images)) - for i, img in enumerate(current_images) : - current_images[i] = enhance_image(img,postprocess_options) + for i, img in enumerate(current_images): + current_images[i] = enhance_image(img, postprocess_options) - for img in current_images : - path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=save_path).name + for img in current_images: + path = tempfile.NamedTemporaryFile( + delete=False, suffix=".png", dir=save_path + ).name img.save(path) images += current_images return images except Exception as e: - logger.error("Batch Process error : %s",e) + logger.error("Batch Process error : %s", e) import traceback + traceback.print_exc() return None @@ -220,107 +274,164 @@ def tools_ui(): with gr.Tab("Tools"): with gr.Tab("Build"): gr.Markdown( - """Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.""") + """Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.""" + ) with gr.Row(): batch_files = gr.components.File( type="file", file_count="multiple", label="Batch Sources Images", optional=True, - elem_id="faceswaplab_build_batch_files" + elem_id="faceswaplab_build_batch_files", + ) + preview = gr.components.Image( + type="pil", + label="Preview", + interactive=False, + elem_id="faceswaplab_build_preview_face", ) - preview = gr.components.Image(type="pil", label="Preview", interactive=False, elem_id="faceswaplab_build_preview_face") name = gr.Textbox( value="Face", placeholder="Name of the character", label="Name of the character", - elem_id="faceswaplab_build_character_name" + elem_id="faceswaplab_build_character_name", + ) + generate_checkpoint_btn = gr.Button( + "Save", elem_id="faceswaplab_build_save_btn" ) - generate_checkpoint_btn = gr.Button("Save",elem_id="faceswaplab_build_save_btn") with gr.Tab("Compare"): gr.Markdown( - """Give a similarity score between two images (only first face is compared).""") - + """Give a similarity score between two images (only first face is compared).""" + ) + with gr.Row(): - img1 = gr.components.Image(type="pil", - label="Face 1", - elem_id="faceswaplab_compare_face1" + img1 = gr.components.Image( + type="pil", label="Face 1", elem_id="faceswaplab_compare_face1" ) - img2 = gr.components.Image(type="pil", - label="Face 2", - elem_id="faceswaplab_compare_face2" + img2 = gr.components.Image( + type="pil", label="Face 2", elem_id="faceswaplab_compare_face2" ) - compare_btn = gr.Button("Compare",elem_id="faceswaplab_compare_btn") + compare_btn = gr.Button("Compare", elem_id="faceswaplab_compare_btn") compare_result_text = gr.Textbox( - interactive=False, label="Similarity", value="0", elem_id="faceswaplab_compare_result" + interactive=False, + label="Similarity", + value="0", + elem_id="faceswaplab_compare_result", ) with gr.Tab("Extract"): gr.Markdown( - """Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""") + """Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""" + ) with gr.Row(): extracted_source_files = gr.components.File( type="file", file_count="multiple", label="Batch Sources Images", optional=True, - elem_id="faceswaplab_extract_batch_images" + elem_id="faceswaplab_extract_batch_images", ) - extracted_faces = gr.Gallery( - label="Extracted faces", show_label=False, - elem_id="faceswaplab_extract_results" - ).style(columns=[2], rows=[2]) - extract_save_path = gr.Textbox(label="Destination Directory", value="", elem_id="faceswaplab_extract_destination") + extracted_faces = gr.Gallery( + label="Extracted faces", + show_label=False, + elem_id="faceswaplab_extract_results", + ).style(columns=[2], rows=[2]) + extract_save_path = gr.Textbox( + label="Destination Directory", + value="", + elem_id="faceswaplab_extract_destination", + ) extract_btn = gr.Button("Extract", elem_id="faceswaplab_extract_btn") with gr.Tab("Explore Model"): model = gr.Dropdown( choices=models, label="Model not found, please download one and reload automatic 1111", - elem_id="faceswaplab_explore_model" - ) + elem_id="faceswaplab_explore_model", + ) explore_btn = gr.Button("Explore", elem_id="faceswaplab_explore_btn") explore_result_text = gr.Dataframe( - interactive=False, label="Explored", - elem_id="faceswaplab_explore_result" + interactive=False, + label="Explored", + elem_id="faceswaplab_explore_result", ) with gr.Tab("Analyse Face"): - img_to_analyse = gr.components.Image(type="pil", label="Face", elem_id="faceswaplab_analyse_face") - analyse_det_threshold = gr.Slider(0.1, 1, 0.5, step=0.01, label="Detection threshold", elem_id="faceswaplab_analyse_det_threshold") + img_to_analyse = gr.components.Image( + type="pil", label="Face", elem_id="faceswaplab_analyse_face" + ) + analyse_det_threshold = gr.Slider( + 0.1, + 1, + 0.5, + step=0.01, + label="Detection threshold", + elem_id="faceswaplab_analyse_det_threshold", + ) analyse_btn = gr.Button("Analyse", elem_id="faceswaplab_analyse_btn") - analyse_results = gr.Textbox(label="Results", interactive=False, value="", elem_id="faceswaplab_analyse_results") + analyse_results = gr.Textbox( + label="Results", + interactive=False, + value="", + elem_id="faceswaplab_analyse_results", + ) with gr.Tab("Batch Process"): with gr.Tab("Source Images"): gr.Markdown( - """Batch process images. Will apply enhancement in the tools enhancement tab.""") + """Batch process images. Will apply enhancement in the tools enhancement tab.""" + ) with gr.Row(): batch_source_files = gr.components.File( type="file", file_count="multiple", label="Batch Sources Images", optional=True, - elem_id="faceswaplab_batch_images" + elem_id="faceswaplab_batch_images", ) - batch_results = gr.Gallery( - label="Batch result", show_label=False, - elem_id="faceswaplab_batch_results" - ).style(columns=[2], rows=[2]) - batch_save_path = gr.Textbox(label="Destination Directory", value="outputs/faceswap/", elem_id="faceswaplab_batch_destination") - batch_save_btn= gr.Button("Process & Save", elem_id="faceswaplab_extract_btn") + batch_results = gr.Gallery( + label="Batch result", + show_label=False, + elem_id="faceswaplab_batch_results", + ).style(columns=[2], rows=[2]) + batch_save_path = gr.Textbox( + label="Destination Directory", + value="outputs/faceswap/", + elem_id="faceswaplab_batch_destination", + ) + batch_save_btn = gr.Button( + "Process & Save", elem_id="faceswaplab_extract_btn" + ) unit_components = [] - for i in range(1,opts.data.get("faceswaplab_units_count", 3)+1): + for i in range(1, opts.data.get("faceswaplab_units_count", 3) + 1): unit_components += faceswap_unit_ui(False, i, id_prefix="faceswaplab_tab") upscale_options = upscaler_ui() - explore_btn.click(explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text]) + explore_btn.click( + explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text] + ) compare_btn.click(compare, inputs=[img1, img2], outputs=[compare_result_text]) - generate_checkpoint_btn.click(build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview]) - extract_btn.click(extract_faces, inputs=[extracted_source_files, extract_save_path]+upscale_options, outputs=[extracted_faces]) - analyse_btn.click(analyse_faces, inputs=[img_to_analyse,analyse_det_threshold], outputs=[analyse_results]) - batch_save_btn.click(batch_process, inputs=[batch_source_files, batch_save_path]+unit_components+upscale_options, outputs=[batch_results]) + generate_checkpoint_btn.click( + build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview] + ) + extract_btn.click( + extract_faces, + inputs=[extracted_source_files, extract_save_path] + upscale_options, + outputs=[extracted_faces], + ) + analyse_btn.click( + analyse_faces, + inputs=[img_to_analyse, analyse_det_threshold], + outputs=[analyse_results], + ) + batch_save_btn.click( + batch_process, + inputs=[batch_source_files, batch_save_path] + + unit_components + + upscale_options, + outputs=[batch_results], + ) -def on_ui_tabs() : + +def on_ui_tabs(): with gr.Blocks(analytics_enabled=False) as ui_faceswap: tools_ui() - return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")] - + return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")] diff --git a/scripts/faceswaplab_ui/faceswaplab_unit_settings.py b/scripts/faceswaplab_ui/faceswaplab_unit_settings.py index fffd91f..51769cb 100644 --- a/scripts/faceswaplab_ui/faceswaplab_unit_settings.py +++ b/scripts/faceswaplab_ui/faceswaplab_unit_settings.py @@ -8,20 +8,20 @@ import dill as pickle import gradio as gr from insightface.app.common import Face from PIL import Image -from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw) +from scripts.faceswaplab_utils.imgutils import pil_to_cv2, check_against_nsfw from scripts.faceswaplab_utils.faceswaplab_logging import logger + @dataclass class FaceSwapUnitSettings: - # ORDER of parameters is IMPORTANT. It should match the result of faceswap_unit_ui # The image given in reference source_img: Union[Image.Image, str] # The checkpoint file - source_face : str + source_face: str # The batch source images - _batch_files: Union[gr.components.File,List[Image.Image]] + _batch_files: Union[gr.components.File, List[Image.Image]] # Will blend faces if True blend_faces: bool # Enable this unit @@ -29,11 +29,11 @@ class FaceSwapUnitSettings: # Use same gender filtering same_gender: bool # Sort faces by their size (from larger to smaller) - sort_by_size : bool + sort_by_size: bool # If True, discard images with low similarity - check_similarity : bool + check_similarity: bool # if True will compute similarity and add it to the image info - _compute_similarity :bool + _compute_similarity: bool # Minimum similarity against the used face (reference, batch or checkpoint) min_sim: float @@ -42,7 +42,7 @@ class FaceSwapUnitSettings: # The face index to use for swapping _faces_index: str # The face index to get image from source - reference_face_index : int + reference_face_index: int # Swap in the source image in img2img (before processing) swap_in_source: bool @@ -59,7 +59,7 @@ class FaceSwapUnitSettings: @property def faces_index(self): """ - Convert _faces_index from str to int + Convert _faces_index from str to int """ faces_index = { int(x) for x in self._faces_index.strip(",").split(",") if x.isnumeric() @@ -72,7 +72,7 @@ class FaceSwapUnitSettings: return faces_index @property - def compute_similarity(self) : + def compute_similarity(self): return self._compute_similarity or self.check_similarity @property @@ -81,59 +81,67 @@ class FaceSwapUnitSettings: Return empty array instead of None for batch files """ return self._batch_files or [] - + @property - def reference_face(self) : + def reference_face(self): """ Extract reference face (only once and store it for the rest of processing). Reference face is the checkpoint or the source image or the first image in the batch in that order. """ - if not hasattr(self,"_reference_face") : - if self.source_face and self.source_face != "None" : + if not hasattr(self, "_reference_face"): + if self.source_face and self.source_face != "None": with open(self.source_face, "rb") as file: - try : + try: logger.info(f"loading pickle {file.name}") face = Face(pickle.load(file)) self._reference_face = face - except Exception as e : + except Exception as e: logger.error("Failed to load checkpoint : %s", e) - elif self.source_img is not None : + elif self.source_img is not None: if isinstance(self.source_img, str): # source_img is a base64 string - if 'base64,' in self.source_img: # check if the base64 string has a data URL scheme - base64_data = self.source_img.split('base64,')[-1] + if ( + "base64," in self.source_img + ): # check if the base64 string has a data URL scheme + base64_data = self.source_img.split("base64,")[-1] img_bytes = base64.b64decode(base64_data) else: # if no data URL scheme, just decode img_bytes = base64.b64decode(self.source_img) self.source_img = Image.open(io.BytesIO(img_bytes)) source_img = pil_to_cv2(self.source_img) - self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), self.reference_face_index, None) - if self._reference_face is None : - logger.error("Face not found in reference image") - else : + self._reference_face = swapper.get_or_default( + swapper.get_faces(source_img), self.reference_face_index, None + ) + if self._reference_face is None: + logger.error("Face not found in reference image") + else: self._reference_face = None - if self._reference_face is None : + if self._reference_face is None: logger.error("You need at least one reference face") return self._reference_face - + @property - def faces(self) : + def faces(self): """_summary_ Extract all faces (including reference face) to provide an array of faces Only processed once. """ - if self.batch_files is not None and not hasattr(self,"_faces") : - self._faces = [self.reference_face] if self.reference_face is not None else [] - for file in self.batch_files : - if isinstance(file, Image.Image) : + if self.batch_files is not None and not hasattr(self, "_faces"): + self._faces = ( + [self.reference_face] if self.reference_face is not None else [] + ) + for file in self.batch_files: + if isinstance(file, Image.Image): img = file - else : + else: img = Image.open(file.name) - face = swapper.get_or_default(swapper.get_faces(pil_to_cv2(img)), 0, None) - if face is not None : + face = swapper.get_or_default( + swapper.get_faces(pil_to_cv2(img)), 0, None + ) + if face is not None: self._faces.append(face) return self._faces @@ -142,11 +150,26 @@ class FaceSwapUnitSettings: """ Blend the faces using the mean of all embeddings """ - if not hasattr(self,"_blended_faces") : + if not hasattr(self, "_blended_faces"): self._blended_faces = swapper.blend_faces(self.faces) - assert(all([not np.array_equal(self._blended_faces.embedding, face.embedding) for face in self.faces]) if len(self.faces) > 1 else True), "Blended faces cannot be the same as one of the face if len(face)>0" - assert(not np.array_equal(self._blended_faces.embedding,self.reference_face.embedding) if len(self.faces) > 1 else True), "Blended faces cannot be the same as reference face if len(face)>0" + assert ( + all( + [ + not np.array_equal( + self._blended_faces.embedding, face.embedding + ) + for face in self.faces + ] + ) + if len(self.faces) > 1 + else True + ), "Blended faces cannot be the same as one of the face if len(face)>0" + assert ( + not np.array_equal( + self._blended_faces.embedding, self.reference_face.embedding + ) + if len(self.faces) > 1 + else True + ), "Blended faces cannot be the same as reference face if len(face)>0" return self._blended_faces - - diff --git a/scripts/faceswaplab_ui/faceswaplab_unit_ui.py b/scripts/faceswaplab_ui/faceswaplab_unit_ui.py index 62c0ecd..4791803 100644 --- a/scripts/faceswaplab_ui/faceswaplab_unit_ui.py +++ b/scripts/faceswaplab_ui/faceswaplab_unit_ui.py @@ -1,94 +1,143 @@ from scripts.faceswaplab_utils.models_utils import get_face_checkpoints import gradio as gr + def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"): with gr.Tab(f"Face {unit_num}"): with gr.Column(): gr.Markdown( - """Reference is an image. First face will be extracted. - First face of batches sources will be extracted and used as input (or blended if blend is activated).""") + """Reference is an image. First face will be extracted. + First face of batches sources will be extracted and used as input (or blended if blend is activated).""" + ) with gr.Row(): - img = gr.components.Image(type="pil", label="Reference", elem_id=f"{id_prefix}_face{unit_num}_reference_image") + img = gr.components.Image( + type="pil", + label="Reference", + elem_id=f"{id_prefix}_face{unit_num}_reference_image", + ) batch_files = gr.components.File( type="file", file_count="multiple", label="Batch Sources Images", optional=True, - elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files" + elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files", ) gr.Markdown( - """Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""") - with gr.Row() : - + """Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""" + ) + with gr.Row(): face = gr.Dropdown( choices=get_face_checkpoints(), label="Face Checkpoint (precedence over reference face)", - elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint" + elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint", ) - refresh = gr.Button(value='↻', variant='tool', elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints") + refresh = gr.Button( + value="↻", + variant="tool", + elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints", + ) + def refresh_fn(selected): - return gr.Dropdown.update(value=selected, choices=get_face_checkpoints()) - refresh.click(fn=refresh_fn,inputs=face, outputs=face) + return gr.Dropdown.update( + value=selected, choices=get_face_checkpoints() + ) + + refresh.click(fn=refresh_fn, inputs=face, outputs=face) with gr.Row(): - enable = gr.Checkbox(False, placeholder="enable", label="Enable", elem_id=f"{id_prefix}_face{unit_num}_enable") + enable = gr.Checkbox( + False, + placeholder="enable", + label="Enable", + elem_id=f"{id_prefix}_face{unit_num}_enable", + ) blend_faces = gr.Checkbox( - True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)", + True, + placeholder="Blend Faces", + label="Blend Faces ((Source|Checkpoint)+References = 1)", elem_id=f"{id_prefix}_face{unit_num}_blend_faces", - interactive=True + interactive=True, ) gr.Markdown("""Discard images with low similarity or no faces :""") with gr.Row(): - check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity", - elem_id=f"{id_prefix}_face{unit_num}_check_similarity") - compute_similarity = gr.Checkbox(False, label="Compute similarity", - elem_id=f"{id_prefix}_face{unit_num}_compute_similarity") - min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity", - elem_id=f"{id_prefix}_face{unit_num}_min_similarity") + check_similarity = gr.Checkbox( + False, + placeholder="discard", + label="Check similarity", + elem_id=f"{id_prefix}_face{unit_num}_check_similarity", + ) + compute_similarity = gr.Checkbox( + False, + label="Compute similarity", + elem_id=f"{id_prefix}_face{unit_num}_compute_similarity", + ) + min_sim = gr.Slider( + 0, + 1, + 0, + step=0.01, + label="Min similarity", + elem_id=f"{id_prefix}_face{unit_num}_min_similarity", + ) min_ref_sim = gr.Slider( - 0, 1, 0, step=0.01, label="Min reference similarity", - elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity" + 0, + 1, + 0, + step=0.01, + label="Min reference similarity", + elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity", ) - gr.Markdown("""Select the face to be swapped, you can sort by size or use the same gender as the desired face:""") + gr.Markdown( + """Select the face to be swapped, you can sort by size or use the same gender as the desired face:""" + ) with gr.Row(): same_gender = gr.Checkbox( - False, placeholder="Same Gender", label="Same Gender", - elem_id=f"{id_prefix}_face{unit_num}_same_gender" + False, + placeholder="Same Gender", + label="Same Gender", + elem_id=f"{id_prefix}_face{unit_num}_same_gender", ) sort_by_size = gr.Checkbox( - False, placeholder="Sort by size", label="Sort by size (larger>smaller)", - elem_id=f"{id_prefix}_face{unit_num}_sort_by_size" + False, + placeholder="Sort by size", + label="Sort by size (larger>smaller)", + elem_id=f"{id_prefix}_face{unit_num}_sort_by_size", ) target_faces_index = gr.Textbox( value="0", placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)", label="Target face : Comma separated face number(s)", - elem_id=f"{id_prefix}_face{unit_num}_target_faces_index" + elem_id=f"{id_prefix}_face{unit_num}_target_faces_index", + ) + gr.Markdown( + """The following will only affect reference face image (and is not affected by sort by size) :""" ) - gr.Markdown("""The following will only affect reference face image (and is not affected by sort by size) :""") reference_faces_index = gr.Number( value=0, precision=0, minimum=0, placeholder="Which face to get from reference image start from 0", label="Reference source face : start from 0", - elem_id=f"{id_prefix}_face{unit_num}_reference_face_index" + elem_id=f"{id_prefix}_face{unit_num}_reference_face_index", + ) + gr.Markdown( + """Configure swapping. Swapping can occure before img2img, after or both :""", + visible=is_img2img, ) - gr.Markdown("""Configure swapping. Swapping can occure before img2img, after or both :""", visible=is_img2img) swap_in_source = gr.Checkbox( False, placeholder="Swap face in source image", label="Swap in source image (blended face)", visible=is_img2img, - elem_id=f"{id_prefix}_face{unit_num}_swap_in_source" + elem_id=f"{id_prefix}_face{unit_num}_swap_in_source", ) swap_in_generated = gr.Checkbox( True, placeholder="Swap face in generated image", label="Swap in generated image", visible=is_img2img, - elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated" + elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated", ) # If changed, you need to change FaceSwapUnitSettings accordingly # ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings @@ -108,4 +157,4 @@ def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"): reference_faces_index, swap_in_source, swap_in_generated, - ] \ No newline at end of file + ] diff --git a/scripts/faceswaplab_ui/faceswaplab_upscaler_ui.py b/scripts/faceswaplab_ui/faceswaplab_upscaler_ui.py index cfa68a8..c767086 100644 --- a/scripts/faceswaplab_ui/faceswaplab_upscaler_ui.py +++ b/scripts/faceswaplab_ui/faceswaplab_upscaler_ui.py @@ -6,63 +6,122 @@ from modules.shared import cmd_opts, opts, state import scripts.faceswaplab_postprocessing.upscaling as upscaling from scripts.faceswaplab_utils.faceswaplab_logging import logger + def upscaler_ui(): with gr.Tab(f"Post-Processing"): gr.Markdown( - """Upscaling is performed on the whole image. Upscaling happens before face restoration.""") + """Upscaling is performed on the whole image. Upscaling happens before face restoration.""" + ) with gr.Row(): face_restorer_name = gr.Radio( label="Restore Face", choices=["None"] + [x.name() for x in shared.face_restorers], - value=lambda : opts.data.get("faceswaplab_pp_default_face_restorer", shared.face_restorers[0].name()), + value=lambda: opts.data.get( + "faceswaplab_pp_default_face_restorer", + shared.face_restorers[0].name(), + ), type="value", - elem_id="faceswaplab_pp_face_restorer" + elem_id="faceswaplab_pp_face_restorer", ) with gr.Column(): face_restorer_visibility = gr.Slider( - 0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_visibility", 1), step=0.001, label="Restore visibility", - elem_id="faceswaplab_pp_face_restorer_visibility" + 0, + 1, + value=lambda: opts.data.get( + "faceswaplab_pp_default_face_restorer_visibility", 1 + ), + step=0.001, + label="Restore visibility", + elem_id="faceswaplab_pp_face_restorer_visibility", ) codeformer_weight = gr.Slider( - 0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_weight", 1), step=0.001, label="codeformer weight", - elem_id="faceswaplab_pp_face_restorer_weight" - ) + 0, + 1, + value=lambda: opts.data.get( + "faceswaplab_pp_default_face_restorer_weight", 1 + ), + step=0.001, + label="codeformer weight", + elem_id="faceswaplab_pp_face_restorer_weight", + ) upscaler_name = gr.Dropdown( choices=[upscaler.name for upscaler in shared.sd_upscalers], - value= lambda:opts.data.get("faceswaplab_pp_default_upscaler","None"), + value=lambda: opts.data.get("faceswaplab_pp_default_upscaler", "None"), label="Upscaler", - elem_id="faceswaplab_pp_upscaler" + elem_id="faceswaplab_pp_upscaler", + ) + upscaler_scale = gr.Slider( + 1, + 8, + 1, + step=0.1, + label="Upscaler scale", + elem_id="faceswaplab_pp_upscaler_scale", ) - upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale", elem_id="faceswaplab_pp_upscaler_scale") upscaler_visibility = gr.Slider( - 0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_upscaler_visibility", 1), step=0.1, label="Upscaler visibility (if scale = 1)", - elem_id="faceswaplab_pp_upscaler_visibility" + 0, + 1, + value=lambda: opts.data.get( + "faceswaplab_pp_default_upscaler_visibility", 1 + ), + step=0.1, + label="Upscaler visibility (if scale = 1)", + elem_id="faceswaplab_pp_upscaler_visibility", ) with gr.Accordion(f"Post Inpainting", open=True): gr.Markdown( - """Inpainting sends image to inpainting with a mask on face (once for each faces).""") + """Inpainting sends image to inpainting with a mask on face (once for each faces).""" + ) inpainting_when = gr.Dropdown( - elem_id="faceswaplab_pp_inpainting_when", choices = [e.value for e in upscaling.InpaintingWhen.__members__.values()],value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value], label="Enable/When") + elem_id="faceswaplab_pp_inpainting_when", + choices=[ + e.value for e in upscaling.InpaintingWhen.__members__.values() + ], + value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value], + label="Enable/When", + ) inpainting_denoising_strength = gr.Slider( - 0, 1, 0, step=0.01, elem_id="faceswaplab_pp_inpainting_denoising_strength", label="Denoising strenght (will send face to img2img after processing)" + 0, + 1, + 0, + step=0.01, + elem_id="faceswaplab_pp_inpainting_denoising_strength", + label="Denoising strenght (will send face to img2img after processing)", ) - inpainting_denoising_prompt = gr.Textbox("Portrait of a [gender]",elem_id="faceswaplab_pp_inpainting_denoising_prompt", label="Inpainting prompt use [gender] instead of men or woman") - inpainting_denoising_negative_prompt = gr.Textbox("", elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt", label="Inpainting negative prompt use [gender] instead of men or woman") + inpainting_denoising_prompt = gr.Textbox( + "Portrait of a [gender]", + elem_id="faceswaplab_pp_inpainting_denoising_prompt", + label="Inpainting prompt use [gender] instead of men or woman", + ) + inpainting_denoising_negative_prompt = gr.Textbox( + "", + elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt", + label="Inpainting negative prompt use [gender] instead of men or woman", + ) with gr.Row(): samplers_names = [s.name for s in modules.sd_samplers.all_samplers] inpainting_sampler = gr.Dropdown( - choices=samplers_names, - value=[samplers_names[0]], - label="Inpainting Sampler", - elem_id="faceswaplab_pp_inpainting_sampler" - ) - inpainting_denoising_steps = gr.Slider( - 1, 150, 20, step=1, label="Inpainting steps", - elem_id="faceswaplab_pp_inpainting_steps" + choices=samplers_names, + value=[samplers_names[0]], + label="Inpainting Sampler", + elem_id="faceswaplab_pp_inpainting_sampler", + ) + inpainting_denoising_steps = gr.Slider( + 1, + 150, + 20, + step=1, + label="Inpainting steps", + elem_id="faceswaplab_pp_inpainting_steps", ) - - inpaiting_model = gr.Dropdown(choices=["Current"]+sd_models.checkpoint_tiles(), default="Current", label="sd model (experimental)", elem_id="faceswaplab_pp_inpainting_sd_model") + + inpaiting_model = gr.Dropdown( + choices=["Current"] + sd_models.checkpoint_tiles(), + default="Current", + label="sd model (experimental)", + elem_id="faceswaplab_pp_inpainting_sd_model", + ) return [ face_restorer_name, face_restorer_visibility, @@ -76,5 +135,5 @@ def upscaler_ui(): inpainting_denoising_steps, inpainting_sampler, inpainting_when, - inpaiting_model - ] \ No newline at end of file + inpaiting_model, + ] diff --git a/scripts/faceswaplab_utils/faceswaplab_logging.py b/scripts/faceswaplab_utils/faceswaplab_logging.py index a22fcb9..8390e08 100644 --- a/scripts/faceswaplab_utils/faceswaplab_logging.py +++ b/scripts/faceswaplab_utils/faceswaplab_logging.py @@ -4,6 +4,7 @@ import sys from modules import shared from PIL import Image + class ColoredFormatter(logging.Formatter): COLORS = { "DEBUG": "\033[0;36m", # CYAN @@ -40,14 +41,18 @@ loglevel = getattr(logging, loglevel_string.upper(), "INFO") logger.setLevel(loglevel) import tempfile -if logger.getEffectiveLevel() <= logging.DEBUG : + +if logger.getEffectiveLevel() <= logging.DEBUG: DEBUG_DIR = tempfile.mkdtemp() + def save_img_debug(img: Image.Image, message: str, *opts): if logger.getEffectiveLevel() <= logging.DEBUG: - with tempfile.NamedTemporaryFile(dir=DEBUG_DIR, delete=False, suffix=".png") as temp_file: + with tempfile.NamedTemporaryFile( + dir=DEBUG_DIR, delete=False, suffix=".png" + ) as temp_file: img_path = temp_file.name img.save(img_path) message_with_link = f"{message}\nImage: file://{img_path}" - logger.debug(message_with_link, *opts) \ No newline at end of file + logger.debug(message_with_link, *opts) diff --git a/scripts/faceswaplab_utils/imgutils.py b/scripts/faceswaplab_utils/imgutils.py index 0bb6772..23e0f05 100644 --- a/scripts/faceswaplab_utils/imgutils.py +++ b/scripts/faceswaplab_utils/imgutils.py @@ -1,6 +1,6 @@ import io from typing import Optional -from PIL import Image, ImageChops, ImageOps,ImageFilter +from PIL import Image, ImageChops, ImageOps, ImageFilter import cv2 import numpy as np from math import isqrt, ceil @@ -10,6 +10,7 @@ from scripts.faceswaplab_globals import NSFW_SCORE from modules import processing import base64 + def check_against_nsfw(img): shapes = [] chunks = detect(img) @@ -17,6 +18,7 @@ def check_against_nsfw(img): shapes.append(chunk["score"] > NSFW_SCORE) return any(shapes) + def pil_to_cv2(pil_img): return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR) @@ -24,6 +26,7 @@ def pil_to_cv2(pil_img): def cv2_to_pil(cv2_img): return Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)) + def torch_to_pil(images): """ Convert a numpy image or a batch of images to a PIL image. @@ -49,35 +52,38 @@ def pil_to_torch(pil_images): torch_image = torch.from_numpy(numpy_image).permute(2, 0, 1) return torch_image + from collections import Counter + + def create_square_image(image_list): """ Creates a square image by combining multiple images in a grid pattern. - + Args: image_list (list): List of PIL Image objects to be combined. - + Returns: PIL Image object: The resulting square image. None: If the image_list is empty or contains only one image. """ - + # Count the occurrences of each image size in the image_list size_counter = Counter(image.size for image in image_list) - + # Get the most common image size (size with the highest count) common_size = size_counter.most_common(1)[0][0] - + # Filter the image_list to include only images with the common size image_list = [image for image in image_list if image.size == common_size] - + # Get the dimensions (width and height) of the common size size = common_size - + # If there are more than one image in the image_list if len(image_list) > 1: num_images = len(image_list) - + # Calculate the number of rows and columns for the grid rows = isqrt(num_images) cols = ceil(num_images / rows) @@ -97,10 +103,11 @@ def create_square_image(image_list): # Return the resulting square image return square_image - + # Return None if there are no images or only one image in the image_list return None + def create_mask(image, box_coords): width, height = image.size mask = Image.new("L", (width, height), 255) @@ -113,43 +120,47 @@ def create_mask(image, box_coords): mask.putpixel((x, y), 0) return mask -def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch_index : int) -> Image.Image : + +def apply_mask( + img: Image.Image, p: processing.StableDiffusionProcessing, batch_index: int +) -> Image.Image: """ Apply mask overlay and color correction to an image if enabled - + Args: img: PIL Image objects. p : The processing object batch_index : the batch index - + Returns: PIL Image object """ - if isinstance(p, processing.StableDiffusionProcessingImg2Img) : - if p.inpaint_full_res : + if isinstance(p, processing.StableDiffusionProcessingImg2Img): + if p.inpaint_full_res: overlays = p.overlay_images if overlays is None or batch_index >= len(overlays): return img - overlay : Image.Image = overlays[batch_index] - overlay = overlay.resize((img.size), resample= Image.Resampling.LANCZOS) + overlay: Image.Image = overlays[batch_index] + overlay = overlay.resize((img.size), resample=Image.Resampling.LANCZOS) img = img.copy() img.paste(overlay, (0, 0), overlay) - return img - + return img + img = processing.apply_overlay(img, p.paste_to, batch_index, p.overlay_images) if p.color_corrections is not None and batch_index < len(p.color_corrections): - img = processing.apply_color_correction(p.color_corrections[batch_index], img) + img = processing.apply_color_correction( + p.color_corrections[batch_index], img + ) return img - def prepare_mask( mask: Image.Image, p: processing.StableDiffusionProcessing ) -> Image.Image: """ Prepare an image mask for the inpainting process. (This comes from controlnet) - This function takes as input a PIL Image object and an instance of the + This function takes as input a PIL Image object and an instance of the StableDiffusionProcessing class, and performs the following steps to prepare the mask: 1. Convert the mask to grayscale (mode "L"). @@ -160,26 +171,26 @@ def prepare_mask( Args: mask (Image.Image): The input mask as a PIL Image object. - p (processing.StableDiffusionProcessing): An instance of the StableDiffusionProcessing class + p (processing.StableDiffusionProcessing): An instance of the StableDiffusionProcessing class containing the processing parameters. Returns: mask (Image.Image): The prepared mask as a PIL Image object. """ mask = mask.convert("L") - #FIXME : Properly fix blur + # FIXME : Properly fix blur # if getattr(p, "mask_blur", 0) > 0: # mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur)) return mask -def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] : - if base64str is None : + +def base64_to_pil(base64str: Optional[str]) -> Optional[Image.Image]: + if base64str is None: return None - if 'base64,' in base64str: # check if the base64 string has a data URL scheme - base64_data = base64str.split('base64,')[-1] + if "base64," in base64str: # check if the base64 string has a data URL scheme + base64_data = base64str.split("base64,")[-1] img_bytes = base64.b64decode(base64_data) else: # if no data URL scheme, just decode img_bytes = base64.b64decode(base64str) return Image.open(io.BytesIO(img_bytes)) - diff --git a/scripts/faceswaplab_utils/models_utils.py b/scripts/faceswaplab_utils/models_utils.py index a803870..dba19a4 100644 --- a/scripts/faceswaplab_utils/models_utils.py +++ b/scripts/faceswaplab_utils/models_utils.py @@ -1,4 +1,3 @@ - import glob import os import modules.scripts as scripts @@ -7,6 +6,7 @@ from scripts.faceswaplab_globals import EXTENSION_PATH from modules.shared import opts from scripts.faceswaplab_utils.faceswaplab_logging import logger + def get_models(): """ Retrieve a list of swap model files. @@ -29,17 +29,21 @@ def get_models(): return models -def get_current_model() -> str : + +def get_current_model() -> str: model = opts.data.get("faceswaplab_model", None) - if model is None : + if model is None: models = get_models() model = models[0] if len(models) else None logger.info("Try to use model : %s", model) if not os.path.isfile(model): logger.error("The model %s cannot be found or loaded", model) - raise FileNotFoundError("No faceswap model found. Please add it to the faceswaplab directory.") + raise FileNotFoundError( + "No faceswap model found. Please add it to the faceswaplab directory." + ) return model + def get_face_checkpoints(): """ Retrieve a list of face checkpoint paths. @@ -50,6 +54,8 @@ def get_face_checkpoints(): Returns: list: A list of face paths, including the string "None" as the first element. """ - faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl") + faces_path = os.path.join( + scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl" + ) faces = glob.glob(faces_path) return ["None"] + faces