improve tests

main
Tran Xen 2 years ago
parent 15e9366eb6
commit 750c9be713

@ -1,3 +1,5 @@
# Keep a copy of this file here, it is used by the server side api
from typing import List, Tuple
from PIL import Image
from pydantic import BaseModel, Field
@ -156,6 +158,19 @@ class FaceSwapResponse(BaseModel):
return [base64_to_pil(img) for img in self.images]
class FaceSwapCompareRequest(BaseModel):
image1: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
image2: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
def pil_to_base64(img: Image.Image) -> np.array: # type:ignore
if isinstance(img, str):
img = Image.open(img)

@ -1,11 +1,12 @@
import requests
from client_utils import (
from api_utils import (
FaceSwapRequest,
FaceSwapUnit,
PostProcessingOptions,
FaceSwapResponse,
pil_to_base64,
InpaintingWhen,
FaceSwapCompareRequest,
)
address = "http://127.0.0.1:7860"
@ -48,6 +49,19 @@ result = requests.post(
)
response = FaceSwapResponse.parse_obj(result.json())
print(response.json())
for img in response.pil_images:
img.show()
request = FaceSwapCompareRequest(
image1=pil_to_base64("../references/man.png"),
image2=pil_to_base64(response.pil_images[0]),
)
result = requests.post(
url=f"{address}/faceswaplab/compare",
data=request.json(),
headers={"Content-Type": "application/json; charset=utf-8"},
)
print("similarity", result.text)

@ -2,7 +2,7 @@ from PIL import Image
import numpy as np
from fastapi import FastAPI
from modules.api import api
from scripts.faceswaplab_api.faceswaplab_api_types import (
from client_api.api_utils import (
FaceSwapResponse,
)
from scripts.faceswaplab_globals import VERSION_FLAG
@ -16,7 +16,7 @@ from scripts.faceswaplab_utils.imgutils import (
from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_api import faceswaplab_api_types
from client_api import api_utils
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
@ -59,7 +59,7 @@ def encode_np_to_base64(image: np.ndarray) -> str: # type: ignore
def get_postprocessing_options(
options: faceswaplab_api_types.PostProcessingOptions,
options: api_utils.PostProcessingOptions,
) -> PostProcessingOptions:
pp_options = PostProcessingOptions(
face_restorer_name=options.face_restorer_name,
@ -73,7 +73,9 @@ def get_postprocessing_options(
inpainting_negative_prompt=options.inpainting_negative_prompt,
inpainting_steps=options.inpainting_steps,
inpainting_sampler=options.inpainting_sampler,
inpainting_when=options.inpainting_when,
# hacky way to prevent having a separate file for Inpainting when (2 classes)
# therfore a conversion is required from api IW to server side IW
inpainting_when=InpaintingWhen(options.inpainting_when.value),
inpainting_model=options.inpainting_model,
)
@ -85,7 +87,7 @@ def get_postprocessing_options(
def get_faceswap_units_settings(
api_units: List[faceswaplab_api_types.FaceSwapUnit],
api_units: List[api_utils.FaceSwapUnit],
) -> List[FaceSwapUnitSettings]:
units = []
for u in api_units:
@ -127,8 +129,8 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None:
description="Swap a face in an image using units",
)
async def swap_face(
request: faceswaplab_api_types.FaceSwapRequest,
) -> faceswaplab_api_types.FaceSwapResponse:
request: api_utils.FaceSwapRequest,
) -> api_utils.FaceSwapResponse:
units: List[FaceSwapUnitSettings] = []
src_image: Optional[Image.Image] = base64_to_pil(request.image)
response = FaceSwapResponse(images=[], infos=[])
@ -147,3 +149,15 @@ def faceswaplab_api(_: gr.Blocks, app: FastAPI) -> None:
response.infos = [] # Not used atm
return response
@app.post(
"/faceswaplab/compare",
tags=["faceswaplab"],
description="Compare first face of each images",
)
async def compare(
request: api_utils.FaceSwapCompareRequest,
) -> float:
return swapper.compare_faces(
base64_to_pil(request.image1), base64_to_pil(request.image2)
)

@ -1,144 +0,0 @@
from typing import List, Optional, Tuple
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (
base64_to_pil,
)
from pydantic import BaseModel, Field
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
class FaceSwapUnit(BaseModel):
# The image given in reference
source_img: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
# The checkpoint file
source_face: str = Field(
description="face checkpoint (from models/faceswaplab/faces)",
examples=["my_face.pkl"],
default=None,
)
# base64 batch source images
batch_images: Tuple[str] = Field(
description="list of base64 batch source images",
examples=[
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....",
],
default=None,
)
# Will blend faces if True
blend_faces: bool = Field(description="Will blend faces if True", default=True)
# Use same gender filtering
same_gender: bool = Field(description="Use same gender filtering", default=False)
# Use same gender filtering
sort_by_size: bool = Field(description="Sort Faces by size", default=False)
# If True, discard images with low similarity
check_similarity: bool = Field(
description="If True, discard images with low similarity", default=False
)
# if True will compute similarity and add it to the image info
compute_similarity: bool = Field(
description="If True will compute similarity and add it to the image info",
default=False,
)
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(
description="Minimum similarity against the used face (reference, batch or checkpoint)",
default=0.0,
)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(
description="Minimum similarity against the reference (reference or checkpoint if checkpoint is given)",
default=0.0,
)
# The face index to use for swapping
faces_index: Tuple[int] = Field(
description="The face index to use for swapping, list of face numbers starting from 0",
default=(0,),
)
reference_face_index: int = Field(
description="The face index to use to extract face from reference",
default=0,
)
def get_batch_images(self) -> List[Image.Image]:
images = []
if self.batch_images:
for img in self.batch_images:
images.append(base64_to_pil(img))
return images
class PostProcessingOptions(BaseModel):
face_restorer_name: str = Field(description="face restorer name", default=None)
restorer_visibility: float = Field(
description="face restorer visibility", default=1, le=1, ge=0
)
codeformer_weight: float = Field(
description="face restorer codeformer weight", default=1, le=1, ge=0
)
upscaler_name: str = Field(description="upscaler name", default=None)
scale: float = Field(description="upscaling scale", default=1, le=10, ge=0)
upscaler_visibility: float = Field(
description="upscaler visibility", default=1, le=1, ge=0
)
inpainting_denoising_strengh: float = Field(
description="Inpainting denoising strenght", default=0, lt=1, ge=0
)
inpainting_prompt: str = Field(
description="Inpainting denoising strenght",
examples=["Portrait of a [gender]"],
default="Portrait of a [gender]",
)
inpainting_negative_prompt: str = Field(
description="Inpainting denoising strenght",
examples=[
"Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"
],
default="",
)
inpainting_steps: int = Field(
description="Inpainting steps",
examples=["Portrait of a [gender]"],
ge=1,
le=150,
default=20,
)
inpainting_sampler: str = Field(
description="Inpainting sampler", examples=["Euler"], default="Euler"
)
inpainting_when: InpaintingWhen = Field(
description="When inpainting happens",
examples=[e.value for e in InpaintingWhen.__members__.values()],
default=InpaintingWhen.NEVER,
)
inpainting_model: str = Field(
description="Inpainting model", examples=["Current"], default="Current"
)
class FaceSwapRequest(BaseModel):
image: str = Field(
description="base64 reference image",
examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."],
default=None,
)
units: List[FaceSwapUnit]
postprocessing: Optional[PostProcessingOptions]
class FaceSwapResponse(BaseModel):
images: List[str] = Field(description="base64 swapped image", default=None)
infos: List[str]

@ -27,6 +27,7 @@ from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions,
)
from scripts.faceswaplab_utils.models_utils import get_current_model
import gradio as gr
providers = ["CPUExecutionProvider"]
@ -91,6 +92,21 @@ def batch_process(
units: List[FaceSwapUnitSettings],
postprocess_options: PostProcessingOptions,
) -> Optional[List[Image.Image]]:
"""
Process a batch of images, apply face swapping according to the given settings, and optionally save the resulting images to a specified path.
Args:
src_images (List[Image.Image]): List of source PIL Images to process.
save_path (Optional[str]): Destination path where the processed images will be saved. If None, no images are saved.
units (List[FaceSwapUnitSettings]): List of FaceSwapUnitSettings to apply to the images.
postprocess_options (PostProcessingOptions): Post-processing settings to be applied to the images.
Returns:
Optional[List[Image.Image]]: List of processed images, or None in case of an exception.
Raises:
Any exceptions raised by the underlying process will be logged and the function will return None.
"""
try:
if save_path:
os.makedirs(save_path, exist_ok=True)
@ -281,9 +297,6 @@ def get_or_default(l: List[Any], index: int, default: Any) -> Any:
return l[index] if index < len(l) else default
import gradio as gr
def get_faces_from_img_files(files: List[gr.File]) -> List[Optional[np.ndarray]]: # type: ignore
"""
Extracts faces from a list of image files.
@ -536,6 +549,25 @@ def process_images_units(
upscaled_swapper: bool = False,
force_blend: bool = False,
) -> Optional[List[Tuple[Image.Image, str]]]:
"""
Process a list of images using a specified model and unit settings for face swapping.
Args:
model (str): The name of the model to use for processing.
units (List[FaceSwapUnitSettings]): A list of settings for face swap units to apply on each image.
images (List[Tuple[Optional[Image.Image], Optional[str]]]): A list of tuples, each containing
an image and its associated info string. If an image or info string is not available,
its value can be None.
upscaled_swapper (bool, optional): If True, uses an upscaled version of the face swapper.
Defaults to False.
force_blend (bool, optional): If True, forces the blending of the swapped face on the original
image. Defaults to False.
Returns:
Optional[List[Tuple[Image.Image, str]]]: A list of tuples, each containing a processed image
and its associated info string. If no units are provided for processing, returns None.
"""
if len(units) == 0:
logger.info("Finished processing image, return %s images", len(images))
return None

@ -5,7 +5,7 @@ import sys
sys.path.append(".")
from client_api.client_utils import (
from client_api.api_utils import (
FaceSwapUnit,
FaceSwapResponse,
PostProcessingOptions,
@ -13,6 +13,7 @@ from client_api.client_utils import (
base64_to_pil,
pil_to_base64,
InpaintingWhen,
FaceSwapCompareRequest,
)
from PIL import Image
@ -62,6 +63,22 @@ def test_version() -> None:
assert "version" in response.json()
def test_compare() -> None:
request = FaceSwapCompareRequest(
image1=pil_to_base64("references/man.png"),
image2=pil_to_base64("references/man.png"),
)
response = requests.post(
url=f"{base_url}/faceswaplab/compare",
data=request.json(),
headers={"Content-Type": "application/json; charset=utf-8"},
)
assert response.status_code == 200
similarity = float(response.text)
assert similarity > 0.90
def test_faceswap(face_swap_request: FaceSwapRequest) -> None:
response = requests.post(
f"{base_url}/faceswaplab/swap_face",
@ -81,3 +98,19 @@ def test_faceswap(face_swap_request: FaceSwapRequest) -> None:
orig_image = base64_to_pil(face_swap_request.image)
assert image.width == orig_image.width * face_swap_request.postprocessing.scale
assert image.height == orig_image.height * face_swap_request.postprocessing.scale
# Compare the result and ensure similarity for the man (first face)
request = FaceSwapCompareRequest(
image1=pil_to_base64("references/man.png"),
image2=res.images[0],
)
response = requests.post(
url=f"{base_url}/faceswaplab/compare",
data=request.json(),
headers={"Content-Type": "application/json; charset=utf-8"},
)
assert response.status_code == 200
similarity = float(response.text)
assert similarity > 0.50

Loading…
Cancel
Save