add option for gpu in settings

main
Tran Xen 2 years ago
parent 76dbd57ad5
commit 0db1f452fd

@ -172,6 +172,8 @@ You don't have to use the api_utils.py file and pydantic types, but it can save
## Experimental GPU support ## Experimental GPU support
You need a sufficiently recent version of your SD environment. Using the GPU has a lot of little drawbacks to understand, but the performance gain is substantial.
In Version 1.2.1, the ability to use the GPU has been added, a setting that can be configured in SD at startup. Currently, this feature is only supported on Windows and Linux, as the necessary dependencies for Mac have not been included. In Version 1.2.1, the ability to use the GPU has been added, a setting that can be configured in SD at startup. Currently, this feature is only supported on Windows and Linux, as the necessary dependencies for Mac have not been included.
The `--faceswaplab_gpu` option in SD can be added to the args in webui-user.sh or webui-user.bat. The `--faceswaplab_gpu` option in SD can be added to the args in webui-user.sh or webui-user.bat.
@ -184,6 +186,16 @@ The `auto_det_size` option emulates the old behavior. It has no difference on CP
If you enabled GPU and you are sure you avec a CUDA compatible card and the model keep using CPU provider, please checks that you have onnxruntime-gpu installed. If you enabled GPU and you are sure you avec a CUDA compatible card and the model keep using CPU provider, please checks that you have onnxruntime-gpu installed.
### SD.NEXT and GPU
Please read carefully.
Using the GPU requires the use of the onnxruntime-gpu>=1.15.0 dependency. For the moment, this conflicts with older SD.Next dependencies (tensorflow, which uses numpy and potentially rembg). You will need to check numpy>=1.24.2 and tensorflow>=2.13.0.
You should therefore be able to debug a little before activating the option. If you don't feel up to it, it's best not to use it.
The first time the swap is used, the program will continue to use the CPU, but will offer to install the GPU. You will then need to restart. This is due to the optimizations made by SD.Next to the installation scripts.
## Settings ## Settings
You can change the program's default behavior in your webui's global settings (FaceSwapLab section in settings). This is particularly useful if you want to have default options for inpainting or for post-processsing, for example. You can change the program's default behavior in your webui's global settings (FaceSwapLab section in settings). This is particularly useful if you want to have default options for inpainting or for post-processsing, for example.

@ -20,6 +20,12 @@ Before beginning the installation process, if you are using Windows, you need to
3. OR if you don't want to install either the full Visual Studio suite or the VS C++ Build Tools: Follow the instructions provided in section VIII of the documentation. 3. OR if you don't want to install either the full Visual Studio suite or the VS C++ Build Tools: Follow the instructions provided in section VIII of the documentation.
## SD.Next / Vladmantic
SD.Next loading optimizations in relation to extension installation scripts can sometimes cause problems. This is particularly the case if you copy the script without installing it via the interface.
If you get an error after startup, try restarting the server.
## Manual Install ## Manual Install
To install the extension, follow the steps below: To install the extension, follow the steps below:

@ -5,7 +5,11 @@ import pkg_resources
from modules import shared from modules import shared
from packaging.version import parse from packaging.version import parse
use_gpu = getattr(shared.cmd_opts, "faceswaplab_gpu", False)
def check_install() -> None:
use_gpu = getattr(
shared.cmd_opts, "faceswaplab_gpu", False
) or shared.opts.data.get("faceswaplab_use_gpu", False)
if use_gpu and sys.platform != "darwin": if use_gpu and sys.platform != "darwin":
req_file = os.path.join( req_file = os.path.join(
@ -16,11 +20,12 @@ else:
os.path.dirname(os.path.realpath(__file__)), "requirements.txt" os.path.dirname(os.path.realpath(__file__)), "requirements.txt"
) )
def is_installed(package: str) -> bool: def is_installed(package: str) -> bool:
package_name = package.split("==")[0].split(">=")[0].strip() package_name = package.split("==")[0].split(">=")[0].strip()
try: try:
installed_version = parse(pkg_resources.get_distribution(package_name).version) installed_version = parse(
pkg_resources.get_distribution(package_name).version
)
except pkg_resources.DistributionNotFound: except pkg_resources.DistributionNotFound:
return False return False
@ -33,7 +38,6 @@ def is_installed(package: str) -> bool:
else: else:
return True return True
print("Checking faceswaplab requirements") print("Checking faceswaplab requirements")
with open(req_file) as file: with open(req_file) as file:
for package in file: for package in file:
@ -43,10 +47,16 @@ with open(req_file) as file:
if not is_installed(package): if not is_installed(package):
print(f"Install {package}") print(f"Install {package}")
launch.run_pip( launch.run_pip(
f"install {package}", f"sd-webui-faceswaplab requirement: {package}" f"install {package}",
f"sd-webui-faceswaplab requirement: {package}",
) )
except Exception as e: except Exception as e:
print(e) print(e)
print(f"Warning: Failed to install {package}, faceswaplab will not work.") print(
f"Warning: Failed to install {package}, faceswaplab will not work."
)
raise e raise e
check_install()

@ -1,24 +1,57 @@
import os import os
from tqdm import tqdm from tqdm import tqdm
import traceback
import urllib.request import urllib.request
from scripts.faceswaplab_utils.faceswaplab_logging import logger from scripts.faceswaplab_utils.faceswaplab_logging import logger
from scripts.faceswaplab_swapping.swapper import is_sha1_matching
from scripts.faceswaplab_utils.models_utils import get_models
from scripts.faceswaplab_globals import * from scripts.faceswaplab_globals import *
from packaging import version from packaging import version
import pkg_resources import pkg_resources
import hashlib
ALREADY_DONE = False ALREADY_DONE = False
def check_install() -> None:
# Very ugly hack :( due to sdnext optimization not calling install.py every time if git log has not changed
import importlib.util
import sys
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
check_install_path = os.path.join(current_dir, "..", "install.py")
spec = importlib.util.spec_from_file_location("check_install", check_install_path)
check_install = importlib.util.module_from_spec(spec)
sys.modules["check_install"] = check_install
spec.loader.exec_module(check_install)
check_install.check_install() # type: ignore
#### End of ugly hack :( !
def is_sha1_matching(file_path: str, expected_sha1: str) -> bool:
sha1_hash = hashlib.sha1(usedforsecurity=False)
try:
with open(file_path, "rb") as file:
for byte_block in iter(lambda: file.read(4096), b""):
sha1_hash.update(byte_block)
if sha1_hash.hexdigest() == expected_sha1:
return True
else:
return False
except Exception as e:
logger.error(
"Failed to check model hash, check the model is valid or has been downloaded adequately : %e",
e,
)
traceback.print_exc()
return False
def check_configuration() -> None: def check_configuration() -> None:
global ALREADY_DONE global ALREADY_DONE
if ALREADY_DONE: if ALREADY_DONE:
return return
logger.info(f"FaceSwapLab {VERSION_FLAG} Config :")
# This has been moved here due to pb with sdnext in install.py not doing what a1111 is doing. # This has been moved here due to pb with sdnext in install.py not doing what a1111 is doing.
models_dir = MODELS_DIR models_dir = MODELS_DIR
faces_dir = FACES_DIR faces_dir = FACES_DIR
@ -48,6 +81,9 @@ def check_configuration() -> None:
os.makedirs(models_dir, exist_ok=True) os.makedirs(models_dir, exist_ok=True)
os.makedirs(faces_dir, exist_ok=True) os.makedirs(faces_dir, exist_ok=True)
if not os.path.exists(model_path):
download(model_url, model_path)
if not is_sha1_matching(model_path, EXPECTED_INSWAPPER_SHA1): if not is_sha1_matching(model_path, EXPECTED_INSWAPPER_SHA1):
logger.error( logger.error(
"Suspicious sha1 for model %s, check the model is valid or has been downloaded adequately. Should be %s", "Suspicious sha1 for model %s, check the model is valid or has been downloaded adequately. Should be %s",
@ -63,17 +99,4 @@ def check_configuration() -> None:
gradio_version, gradio_version,
) )
if not os.path.exists(model_path):
download(model_url, model_path)
def print_infos() -> None:
logger.info("FaceSwapLab config :")
logger.info("+ MODEL DIR : %s", models_dir)
models = get_models()
logger.info("+ MODELS: %s", models)
logger.info("+ FACES DIR : %s", faces_dir)
logger.info("+ ANALYZER DIR : %s", ANALYZER_DIR)
print_infos()
ALREADY_DONE = True ALREADY_DONE = True

@ -1,8 +1,11 @@
from scripts.configure import check_configuration
check_configuration()
import importlib import importlib
import traceback import traceback
from scripts import faceswaplab_globals from scripts import faceswaplab_globals
from scripts.configure import check_configuration
from scripts.faceswaplab_api import faceswaplab_api from scripts.faceswaplab_api import faceswaplab_api
from scripts.faceswaplab_postprocessing import upscaling from scripts.faceswaplab_postprocessing import upscaling
from scripts.faceswaplab_settings import faceswaplab_settings from scripts.faceswaplab_settings import faceswaplab_settings
@ -12,9 +15,13 @@ from scripts.faceswaplab_utils import faceswaplab_logging, imgutils, models_util
from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.models_utils import get_current_model
from scripts.faceswaplab_utils.typing import * from scripts.faceswaplab_utils.typing import *
from scripts.faceswaplab_utils.ui_utils import dataclasses_from_flat_list from scripts.faceswaplab_utils.ui_utils import dataclasses_from_flat_list
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
# Reload all the modules when using "apply and restart" # Reload all the modules when using "apply and restart"
# This is mainly done for development purposes # This is mainly done for development purposes
import logging
if logger.getEffectiveLevel() <= logging.DEBUG:
importlib.reload(swapper) importlib.reload(swapper)
importlib.reload(faceswaplab_logging) importlib.reload(faceswaplab_logging)
importlib.reload(faceswaplab_globals) importlib.reload(faceswaplab_globals)
@ -46,7 +53,6 @@ from scripts.faceswaplab_postprocessing.postprocessing_options import (
PostProcessingOptions, PostProcessingOptions,
) )
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab") EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab")
@ -67,7 +73,6 @@ except:
class FaceSwapScript(scripts.Script): class FaceSwapScript(scripts.Script):
def __init__(self) -> None: def __init__(self) -> None:
super().__init__() super().__init__()
check_configuration()
@property @property
def units_count(self) -> int: def units_count(self) -> int:

@ -16,6 +16,16 @@ def on_ui_settings() -> None:
section=section, section=section,
), ),
) )
shared.opts.add_option(
"faceswaplab_use_gpu",
shared.OptionInfo(
False,
"Use GPU, only for CUDA on Windows/Linux - experimental and risky, can messed up dependencies (requires restart)",
gr.Checkbox,
{"interactive": True},
section=section,
),
)
shared.opts.add_option( shared.opts.add_option(
"faceswaplab_keep_original", "faceswaplab_keep_original",
shared.OptionInfo( shared.OptionInfo(

@ -9,7 +9,6 @@ from tqdm import tqdm
import sys import sys
from io import StringIO from io import StringIO
from contextlib import contextmanager from contextlib import contextmanager
import hashlib
import cv2 import cv2
import insightface import insightface
@ -40,12 +39,30 @@ from scripts.faceswaplab_inpainting.i2i_pp import img2img_diffusion
from modules import shared from modules import shared
import onnxruntime import onnxruntime
USE_GPU = (
getattr(shared.cmd_opts, "faceswaplab_gpu", False) and sys.platform != "darwin" def use_gpu() -> bool:
return (
getattr(shared.cmd_opts, "faceswaplab_gpu", False)
or opts.data.get("faceswaplab_use_gpu", False)
) and sys.platform != "darwin"
@lru_cache
def force_install_gpu_providers() -> None:
# Ugly Ugly hack due to SDNEXT :
from scripts.configure import check_install
logger.warning("Try to reinstall gpu dependencies")
check_install()
logger.warning("IF onnxruntime-gpu has been installed successfully, RESTART")
logger.warning(
"On SD.NEXT/vladmantic you will also need to check numpy>=1.24.2 and tensorflow>=2.13.0"
) )
def get_providers() -> List[str]:
providers = ["CPUExecutionProvider"] providers = ["CPUExecutionProvider"]
if USE_GPU and sys.platform != "darwin": if use_gpu():
if "CUDAExecutionProvider" in onnxruntime.get_available_providers(): if "CUDAExecutionProvider" in onnxruntime.get_available_providers():
providers = ["CUDAExecutionProvider"] providers = ["CUDAExecutionProvider"]
else: else:
@ -53,7 +70,13 @@ if USE_GPU and sys.platform != "darwin":
"CUDAExecutionProvider not found in onnxruntime.available_providers : %s, use CPU instead. Check onnxruntime-gpu is installed.", "CUDAExecutionProvider not found in onnxruntime.available_providers : %s, use CPU instead. Check onnxruntime-gpu is installed.",
onnxruntime.get_available_providers(), onnxruntime.get_available_providers(),
) )
USE_GPU = False force_install_gpu_providers()
return providers
def is_cpu_provider() -> bool:
return get_providers() == ["CPUExecutionProvider"]
def cosine_similarity_face(face1: Face, face2: Face) -> float: def cosine_similarity_face(face1: Face, face2: Face) -> float:
@ -281,20 +304,6 @@ def capture_stdout() -> Generator[StringIO, None, None]:
sys.stdout = original_stdout # Type: ignore sys.stdout = original_stdout # Type: ignore
# On GPU we can keep a non prepared model in ram and deepcopy it every time det_size change (old behaviour)
@lru_cache(maxsize=1)
def get_cpu_analysis() -> insightface.app.FaceAnalysis:
return insightface.app.FaceAnalysis(
name="buffalo_l",
providers=providers,
root=faceswaplab_globals.ANALYZER_DIR,
)
# FIXME : This function is way more complicated than it could be.
# It is done that way to preserve the original behavior with CPU.
# Most users don't reed the doc, so we need to keep the features as close as possible
# to original behavior.
@lru_cache(maxsize=3) @lru_cache(maxsize=3)
def getAnalysisModel( def getAnalysisModel(
det_size: Tuple[int, int] = (640, 640), det_thresh: float = 0.5 det_size: Tuple[int, int] = (640, 640), det_thresh: float = 0.5
@ -309,16 +318,18 @@ def getAnalysisModel(
if not os.path.exists(faceswaplab_globals.ANALYZER_DIR): if not os.path.exists(faceswaplab_globals.ANALYZER_DIR):
os.makedirs(faceswaplab_globals.ANALYZER_DIR) os.makedirs(faceswaplab_globals.ANALYZER_DIR)
providers = get_providers()
logger.info( logger.info(
f"Load analysis model det_size={det_size}, det_thresh={det_thresh}, gpu={USE_GPU}, providers = {providers}, will take some time. (> 30s)" f"Load analysis model det_size={det_size}, det_thresh={det_thresh}, providers = {providers}, will take some time. (> 30s)"
) )
# Initialize the analysis model with the specified name and providers # Initialize the analysis model with the specified name and providers
with tqdm( with tqdm(
total=1, desc="Loading analysis model (first time is slow)", unit="model" total=1,
desc=f"Loading {det_size} analysis model (first time is slow)",
unit="model",
) as pbar: ) as pbar:
with capture_stdout() as captured: with capture_stdout() as captured:
if USE_GPU:
model = insightface.app.FaceAnalysis( model = insightface.app.FaceAnalysis(
name="buffalo_l", name="buffalo_l",
providers=providers, providers=providers,
@ -327,11 +338,6 @@ def getAnalysisModel(
# Prepare the analysis model for face detection with the specified detection size # Prepare the analysis model for face detection with the specified detection size
model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size) model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size)
else:
# This is a hacky way to speed up loading for gpu only
model = copy.deepcopy(get_cpu_analysis())
model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size)
pbar.update(1) pbar.update(1)
logger.info("%s", pformat(captured.getvalue())) logger.info("%s", pformat(captured.getvalue()))
@ -343,25 +349,6 @@ def getAnalysisModel(
raise FaceModelException("Loading of analysis model failed") raise FaceModelException("Loading of analysis model failed")
def is_sha1_matching(file_path: str, expected_sha1: str) -> bool:
sha1_hash = hashlib.sha1(usedforsecurity=False)
try:
with open(file_path, "rb") as file:
for byte_block in iter(lambda: file.read(4096), b""):
sha1_hash.update(byte_block)
if sha1_hash.hexdigest() == expected_sha1:
return True
else:
return False
except Exception as e:
logger.error(
"Failed to check model hash, check the model is valid or has been downloaded adequately : %e",
e,
)
traceback.print_exc()
return False
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def getFaceSwapModel(model_path: str) -> upscaled_inswapper.UpscaledINSwapper: def getFaceSwapModel(model_path: str) -> upscaled_inswapper.UpscaledINSwapper:
""" """
@ -374,14 +361,7 @@ def getFaceSwapModel(model_path: str) -> upscaled_inswapper.UpscaledINSwapper:
insightface.model_zoo.FaceModel: The face swap model. insightface.model_zoo.FaceModel: The face swap model.
""" """
try: try:
expected_sha1 = "17a64851eaefd55ea597ee41e5c18409754244c5" providers = get_providers()
if not is_sha1_matching(model_path, expected_sha1):
logger.error(
"Suspicious sha1 for model %s, check the model is valid or has been downloaded adequately. Should be %s",
model_path,
expected_sha1,
)
with tqdm(total=1, desc="Loading swap model", unit="model") as pbar: with tqdm(total=1, desc="Loading swap model", unit="model") as pbar:
with capture_stdout() as captured: with capture_stdout() as captured:
model = upscaled_inswapper.UpscaledINSwapper( model = upscaled_inswapper.UpscaledINSwapper(

Loading…
Cancel
Save