first commit

main
Tran Xen 2 years ago
commit 056f7a72a2

@ -0,0 +1,34 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Please remember that the bug report section is not a forum. Before submitting a bug, ensure you have read the FAQ thoroughly. This helps us maintain an efficient bug tracking process. Thank you for your understanding and cooperation. Use the discussion section for anything else.**
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

@ -0,0 +1,22 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
I have limited time to develop, so before asking for a feature, please read the FAQ section. Some features will not be implemented by choice. Use the discussion section for anything else.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

204
.gitignore vendored

@ -0,0 +1,204 @@
# Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,python
# Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
models/*.onnx
.vscode
.mypy_cache
models/*
_site/*
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide
# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,python

@ -0,0 +1,28 @@
## 1.1.0 :
All listed in features
+ add inpainting model selection => allow to select a different model for face inpainting
+ add source faces selection => allow to select the reference face if multiple face are present in reference image
+ add select by size => sort faces by size from larger to smaller
+ add batch option => allow to process images without txt2img or i2i in tabs
+ add segmentation mask for upscaled inpainter (based on codeformer implementation) : avoid square mask and prevent degradation of non-face parts of the image.
## 0.1.0 :
### Major :
+ add multiple face support
+ add face blending support (will blend sources faces)
+ add face similarity evaluation (will compare face to a reference)
+ add filters to discard images that are not rated similar enough to reference image and source images
+ add face tools tab
+ face extraction tool
+ face builder tool : will build a face model that can be reused
+ add faces models
### Minor :
Improve performance by not reprocessing source face each time
### Breaking changes
base64 and api not supported anymore (will be reintroduced in the future)

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

@ -0,0 +1,310 @@
# FaceSwapLab 1.1 for a1111/Vlad
Please read the documentation here : http://glucause.github.io/sd-webui-faceswaplab/
FaceSwapLab is an extension for Stable Diffusion that simplifies face-swapping. It has evolved from sd-webui-faceswap and some part of sd-webui-roop. However, a substantial amount of the code has been rewritten to improve performance and to better manage masks.
Some key features include the ability to reuse faces via checkpoints, multiple face units, batch process images, sort faces based on size or gender, and support for vladmantic. It also provides a face inpainting feature.
![](docs/main_interface.png)
While FaceSwapLab is still under development, it has reached a good level of stability. This makes it a reliable tool for those who are interested in face-swapping within the Stable Diffusion environment. As with all projects of this type, its expected to improve and evolve over time.
### Disclaimer
In summary: **This extension should not be forked to provide a public easy way to circumvent NSFW filtering.**
This extension is not intended to enable the creation of not safe for work (NSFW) or deepfake content. While the code for this extension is licensed under the AGPL in order to be in compliance with models and other source materials, it's important to stress that we strongly discourage any attempts to fork this project to create an uncensored version. Any modifications to the code to enable the production of such content would be contrary to the ethical guidelines we advocate for.
We will comply with European regulations regarding this type of software. As required by law, the code may include both visible and invisible watermarks. If your local laws prohibit the use of this extension, you should not use it.
From an ethical perspective, the main goal of this extension is to generate consistent images by swapping faces. It's important to note that we've done our best to integrate censorship features. However, when users can access the source code, they might bypass these censorship measures. That's why we urge users to use this extension responsibly and avoid any malicious use. We emphasize the importance of respecting people's privacy and consent when swapping faces in images. We discourage any activities that could harm others, invade their privacy, or negatively affect their well-being.
Additionally, we believe it's important to make the public aware of these tools and the ease with which deepfakes can be created. As technology improves, we need to be more critical and skeptical when we encounter media content. By promoting media literacy, we can reduce the negative impact of misusing these tools and encourage responsible use in the digital world.
If any user violates their country's legal and ethical rules, we don't accept any liability for this code repository.
## Model License
This software utilizes the pre-trained models `buffalo_l` and `inswapper_128.onnx`, which are provided by InsightFace. These models are included under the following conditions:
_InsightFace's pre-trained models are available for non-commercial research purposes only. This includes both auto-downloading models and manually downloaded models._ from [insighface licence](https://github.com/deepinsight/insightface/tree/master/python-package)
Users of this software must strictly adhere to these conditions of use. The developers and maintainers of this software are not responsible for any misuse of InsightFace's pre-trained models.
Please note that if you intend to use this software for any commercial purposes, you will need to train your own models or find models that can be used commercially.
### Features
+ **Face Unit Concept**: Similar to controlNet, the program introduces the concept of a face unit. You can configure up to 10 units (3 units are the default setting) in the program settings (sd).
![](docs/face_units.png)
+ **Vladmantic and a1111 Support**
+ **Batch Processing**
+ **Inpainting Fixes** : supports “only masked” and mask inpainting.
+ **Performance Improvements**: The overall performance of the software has been enhanced.
+ **FaceSwapLab Tab**: providing various tools (build, compare, extract, batch)
![](docs/tab.png)
+ **FaceSwapLab Settings**: FaceSwapLab settings are now part of the sd settings. To access them, navigate to the sd settings section.
![](docs/settings.png)
+ **Face Reuse Via Checkpoints**: The FaceTools tab now allows creating checkpoints, which facilitate face reuse. When a checkpoint is used, it takes precedence over the reference image, and the reference source image is discarded.
![](docs/checkpoints.png)
![](docs/checkpoints_use.png)
+ **Gender Detection**: The program can now detect gender based on faces.
![](docs/gender.png)
+ **Face Combination (Blending)**: Multiple versions of a face can be combined to enhance the swapping result. This blending happens during checkpoint creation.
![](docs/blend_face.png)
![](docs/testein.png)
+ **Preserve Original Images**: You can opt to keep original images before the swapping process.
![](docs/keep_orig.png)
+ **Multiple Face Versions for Replacement**: The program allows the use of multiple versions of the same face for replacement.
![](docs/multiple_face_src.png)
+ **Face Similarity and Filtering**: You can compare faces against the reference and/or source images.
![](docs/similarity.png)
+ **Face Comparison**: face comparison feature.
![](docs/compare.png)
+ **Face Extraction**: face extraction with or without upscaling.
![](docs/extract.png)
+ **Improved Post-Processing**: codeformer, gfpgan, upscaling.
![](docs/post-processing.png)
+ **Post Inpainting**: This feature allows the application of image-to-image inpainting specifically to faces.
![](docs/postinpainting.png)
![](docs/postinpainting_result.png)
+ **Upscaled Inswapper**: The program now includes an upscaled inswapper option, which improves results by incorporating upsampling, sharpness adjustment, and color correction before face is merged to the original image.
![](docs/upscalled_swapper.png)
+ **API with typing support** :
```python
import base64
import io
import requests
from PIL import Image
from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64
address = 'http://127.0.0.1:7860'
# First face unit :
unit1 = FaceSwapUnit(
source_img=pil_to_base64("../../references/man.png"), # The face you want to use
faces_index=(0,) # Replace first face
)
# Second face unit :
unit2 = FaceSwapUnit(
source_img=pil_to_base64("../../references/woman.png"), # The face you want to use
same_gender=True,
faces_index=(0,) # Replace first woman since same gender is on
)
# Post-processing config :
pp = PostProcessingOptions(
face_restorer_name="CodeFormer",
codeformer_weight=0.5,
restorer_visibility= 1)
# Prepare the request
request = FaceSwapRequest (
image = pil_to_base64("test_image.png"),
units= [unit1, unit2],
postprocessing=pp
)
result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"})
response = FaceSwapResponse.parse_obj(result.json())
for img, info in zip(response.pil_images, response.infos):
img.show(title = info)
```
## Installation
Before beginning the installation process, if you are using Windows, you need to download and install [Visual Studio](https://visualstudio.microsoft.com/downloads/). During the installation process, ensure that the Python and C++ packages are included.
To install the extension, follow the steps below:
1. Open the `web-ui` application and navigate to the "Extensions" tab.
2. Use the URL `https://github.com/glucauze/sd-webui-faceswaplab` in the "install from URL" section.
3. Close the `web-ui` application and reopen it.
If you encounter the error `'NoneType' object has no attribute 'get'`, take the following steps:
1. Download the [inswapper_128.onnx](https://huggingface.co/henryruhs/faceswaplab/resolve/main/inswapper_128.onnx) model.
2. Place the downloaded model inside the `<webui_dir>/models/faceswaplab/` directory.
## Usage
To use this extension, follow the steps below:
1. Navigate to the "faceswaplab" drop-down menu and import an image that contains a face.
2. Enable the extension by checking the "Enable" checkbox.
3. After performing the steps above, the generated result will have the face you selected.
### FAQ
Our issue tracker often contains requests that may originate from a misunderstanding of the software's functionality. We aim to address these queries; however, due to time constraints, we may not be able to respond to each request individually. This FAQ section serves as a preliminary source of information for commonly raised concerns. We recommend reviewing these before submitting an issue.
#### Improving Quality of Results
To get better quality results:
1. Ensure that the "Restore Face" option is enabled.
2. Consider using the "Upscaler" option. For finer control, you can use an upscaler from the "Extras" tab.
3. Use img2img with the denoise parameter set to `0.1`. Gradually increase this parameter until you achieve a balance of quality and resemblance.
You can also use the uspcaled inswapper. I mainly use it with the following options :
![](docs/inswapper_options.png)
#### Replacing Specific Faces
If an image contains multiple faces and you want to swap specific ones:
1. Use the "Comma separated face number(s)" option to select the face numbers you wish to swap.
#### Issues with Face Swapping
If a face did not get swapped, please check the following:
1. Ensure that the "Enable" option has been checked.
2. If you've ensured the above and your console doesn't show any errors, it means that the FaceSwapLab was unable to detect a face in your image or the image was detected as NSFW (Not Safe For Work).
#### Controversy Surrounding NSFW Content Filtering
We understand that some users might wish to have the option to disable content filtering, particularly for Not Safe for Work (NSFW) content. However, it's important to clarify our stance on this matter. We are not categorically against NSFW content. The concern arises specifically when the software is used to superimpose the face of a real person onto NSFW content.
If it were reliably possible to ensure that the faces being swapped were synthetic and not tied to real individuals, the inclusion of NSFW content would pose less of an ethical dilemma. However, in the absence of such a guarantee, making this feature easily accessible could potentially lead to misuse, which is an ethically risky scenario.
This is not our intention to impose our moral perspectives. Our goal is to comply with the requirements of the models used in the software and establish a balanced boundary that respects individual privacy and prevents potential misuse.
Requests to provide an option to disable the content filter will not be considered.
#### What is the role of the segmentation mask for the upscaled swapper?
The segmentation mask for the upscaled swapper is designed to avoid the square mask and prevent degradation of the non-face parts of the image. It is based on the Codeformer implementation. If "Use improved segmented mask (use pastenet to mask only the face)" and "upscaled inswapper" are checked in the settings, the mask will only cover the face, and will not be squared. However, depending on the image, this might introduce different types of problems such as artifacts on the border of the face.
#### How to increase speed of upscaled inswapper?
It is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory.
#### Sharpening and color correction in upscaled swapper :
Sharpening can provide more natural results, but it may also add artifacts. The same goes for color correction. By default, these options are set to False.
#### I don't see any extension after restart
If you do not see any extensions after restarting, it is likely due to missing requirements, particularly if you're using Windows. Follow the instructions below:
1. Verify that there are no error messages in the terminal.
2. Double-check the Installation section of this document to ensure all the steps have been followed.
If you are running a specific configuration (for example, Python 3.11), please test the extension with a clean installation of the stable version of Diffusion before reporting an issue. This can help isolate whether the problem is related to your specific configuration or a broader issue with the extension.
#### Understanding Quality of Results
The model used in this extension initially reduces the resolution of the target face before generating a 128x128 image. This means that regardless of the original image's size, the resolution of the processed faces will not exceed 128x128. Consequently, this lower resolution might lead to quality limitations in the results.
The output of this process might not meet high expectations, but the use of the face restorer and upscaler can help improve these results to some extent.
The quality of results is inherently tied to the capabilities of the model and cannot be enhanced beyond its design. FaceSwapLab merely provides an interface for the underlying model. Therefore, unless the model from insighface is retrained and necessary alterations are made in the library (see below), the resulting quality may not meet high expectations.
Consider this extension as a low-cost alternative to more sophisticated tools like Lora, or as an addition to such tools. It's important to **maintain realistic expectations of the results** provided by this extension.
#### Issue: Incorrect Gender Detection
The gender detection functionality is handled by the underlying analysis model. As such, there might be instances where the detected gender may not be accurate. This is a limitation of the model and we currently do not have a way to improve this accuracy from our end.
#### Why isn't GPU support included?
While implementing GPU support may seem straightforward, simply requiring a modification to the onnxruntime implementation and a change in providers in the swapper, there are reasons we haven't included it as a standard option.
The primary consideration is the substantial VRAM usage of the SD models. Integrating the model on the GPU doesn't result in significant performance gains with the current state of the software. Moreover, the GPU support becomes truly beneficial when processing large numbers of frames or video. However, our experience indicates that this tends to cause more issues than it resolves.
Consequently, requests for GPU support as a standard feature will not be considered.
#### What is the 'Upscaled Inswapper' Option in SD FaceSwapLab?
The 'Upscaled Inswapper' is an option in SD FaceSwapLab which allows for upscaling of each face using an upscaller prior to its integration into the image. This is achieved by modifying a small segment of the InsightFace code.
The purpose of this feature is to enhance the quality of the face in the final image. While this process might slightly increase the processing time, it can deliver improved results. In certain cases, this could even eliminate the need for additional tools such as Codeformer or GFPGAN in postprocessing.
#### What is Face Blending?
Insighface works by creating an embedding for each face. An embedding is essentially a condensed representation of the facial characteristics.
The face blending process allows for the averaging of multiple face embeddings to generate a blended or composite face.
The benefits of face blending include:
+ Generation of a high-quality embedding based on multiple faces, thereby improving the face's representative accuracy.
+ Creation of a composite face that includes features from multiple individuals, which can be useful for diverse face recognition scenarios.
To create a composite face, two methods are available:
1. Use the Checkpoint Builder: This tool allows you to save a set of face embeddings that can be loaded later to create a blended face.
2. Use Image Batch Sources: By dropping several images into this tool, you can generate a blended face based on the faces in the provided images.
#### What is a face checkpoint?
A face checkpoint is a saved embedding of a face, generated from multiple images. This is accomplished via the build tool located in the `sd` tab. The build tool blends all images dropped into the tab and saves the resulting embedding to a file.
The primary advantage of face checkpoints is their size. An embedding is only around 2KB, meaning it's lightweight and can be reused later without requiring additional calculations.
Face checkpoints are saved as `.pkl` files. Please be aware that exchanging `.pkl` files carries potential security risks. These files, by default, are not secure and could potentially execute malicious code when opened. Therefore, extreme caution should be exercised when sharing or receiving this type of file.
#### How is similarity determined?
The similarity between faces is established by comparing their embeddings. In this context, a score of 1 signifies that the two faces are identical, while a score of 0 indicates that the faces are different.
You can remove images from the results if the generated image does not match the reference. This is done by adjusting the sliders in the "Faces" tab.
#### Which model is used?
The model employed here is based on InsightFace's "InSwapper". For more specific information, you can refer [here](https://github.com/deepinsight/insightface/blob/fc622003d5410a64c96024563d7a093b2a55487c/python-package/insightface/model_zoo/inswapper.py#L12).
This model was temporarily made public by the InsightFace team for research purposes. They have not provided any details about the training methodology.
The model generates faces with a resolution of 128x128, which is relatively low. For better results, the generated faces need to be upscaled. The InsightFace code is not designed for higher resolutions (see the [Router](https://github.com/deepinsight/insightface/blob/fc622003d5410a64c96024563d7a093b2a55487c/python-package/insightface/model_zoo/model_zoo.py#L35) class for more information).
#### Why not use SimSwap?
SimSwap models are based on older InsightFace architectures, and SimSwap has not been released as a Python package. Its incorporation would complicate the process, and it does not guarantee any substantial gain.
If you manage to implement SimSwap successfully, feel free to submit a pull request.

5
docs/.gitignore vendored

@ -0,0 +1,5 @@
_site
.sass-cache
.jekyll-cache
.jekyll-metadata
vendor

@ -0,0 +1,25 @@
---
permalink: /404.html
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

@ -0,0 +1,33 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 3.9.3"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
gem "minima", "~> 2.5.1"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
gem "github-pages", "~> 228", group: :jekyll_plugins
group :jekyll_plugins do
gem "webrick"
end
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", ">= 1", "< 3"
gem "tzinfo-data"
end
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin]
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
# do not have a Java counterpart.
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]

@ -0,0 +1,266 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (7.0.6)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 1.6, < 2)
minitest (>= 5.1)
tzinfo (~> 2.0)
addressable (2.8.4)
public_suffix (>= 2.0.2, < 6.0)
coffee-script (2.4.1)
coffee-script-source
execjs
coffee-script-source (1.11.1)
colorator (1.1.0)
commonmarker (0.23.9)
concurrent-ruby (1.2.2)
dnsruby (1.70.0)
simpleidn (~> 0.2.1)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
ethon (0.16.0)
ffi (>= 1.15.0)
eventmachine (1.2.7)
execjs (2.8.1)
faraday (2.7.10)
faraday-net_http (>= 2.0, < 3.1)
ruby2_keywords (>= 0.0.4)
faraday-net_http (3.0.2)
ffi (1.15.5)
forwardable-extended (2.6.0)
gemoji (3.0.1)
github-pages (228)
github-pages-health-check (= 1.17.9)
jekyll (= 3.9.3)
jekyll-avatar (= 0.7.0)
jekyll-coffeescript (= 1.1.1)
jekyll-commonmark-ghpages (= 0.4.0)
jekyll-default-layout (= 0.1.4)
jekyll-feed (= 0.15.1)
jekyll-gist (= 1.5.0)
jekyll-github-metadata (= 2.13.0)
jekyll-include-cache (= 0.2.1)
jekyll-mentions (= 1.6.0)
jekyll-optional-front-matter (= 0.3.2)
jekyll-paginate (= 1.1.0)
jekyll-readme-index (= 0.3.0)
jekyll-redirect-from (= 0.16.0)
jekyll-relative-links (= 0.6.1)
jekyll-remote-theme (= 0.4.3)
jekyll-sass-converter (= 1.5.2)
jekyll-seo-tag (= 2.8.0)
jekyll-sitemap (= 1.4.0)
jekyll-swiss (= 1.0.0)
jekyll-theme-architect (= 0.2.0)
jekyll-theme-cayman (= 0.2.0)
jekyll-theme-dinky (= 0.2.0)
jekyll-theme-hacker (= 0.2.0)
jekyll-theme-leap-day (= 0.2.0)
jekyll-theme-merlot (= 0.2.0)
jekyll-theme-midnight (= 0.2.0)
jekyll-theme-minimal (= 0.2.0)
jekyll-theme-modernist (= 0.2.0)
jekyll-theme-primer (= 0.6.0)
jekyll-theme-slate (= 0.2.0)
jekyll-theme-tactile (= 0.2.0)
jekyll-theme-time-machine (= 0.2.0)
jekyll-titles-from-headings (= 0.5.3)
jemoji (= 0.12.0)
kramdown (= 2.3.2)
kramdown-parser-gfm (= 1.1.0)
liquid (= 4.0.4)
mercenary (~> 0.3)
minima (= 2.5.1)
nokogiri (>= 1.13.6, < 2.0)
rouge (= 3.26.0)
terminal-table (~> 1.4)
github-pages-health-check (1.17.9)
addressable (~> 2.3)
dnsruby (~> 1.60)
octokit (~> 4.0)
public_suffix (>= 3.0, < 5.0)
typhoeus (~> 1.3)
html-pipeline (2.14.3)
activesupport (>= 2)
nokogiri (>= 1.4)
http_parser.rb (0.8.0)
i18n (1.14.1)
concurrent-ruby (~> 1.0)
jekyll (3.9.3)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (>= 0.7, < 2)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (>= 1.17, < 3)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-avatar (0.7.0)
jekyll (>= 3.0, < 5.0)
jekyll-coffeescript (1.1.1)
coffee-script (~> 2.2)
coffee-script-source (~> 1.11.1)
jekyll-commonmark (1.4.0)
commonmarker (~> 0.22)
jekyll-commonmark-ghpages (0.4.0)
commonmarker (~> 0.23.7)
jekyll (~> 3.9.0)
jekyll-commonmark (~> 1.4.0)
rouge (>= 2.0, < 5.0)
jekyll-default-layout (0.1.4)
jekyll (~> 3.0)
jekyll-feed (0.15.1)
jekyll (>= 3.7, < 5.0)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-github-metadata (2.13.0)
jekyll (>= 3.4, < 5.0)
octokit (~> 4.0, != 4.4.0)
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-mentions (1.6.0)
html-pipeline (~> 2.3)
jekyll (>= 3.7, < 5.0)
jekyll-optional-front-matter (0.3.2)
jekyll (>= 3.0, < 5.0)
jekyll-paginate (1.1.0)
jekyll-readme-index (0.3.0)
jekyll (>= 3.0, < 5.0)
jekyll-redirect-from (0.16.0)
jekyll (>= 3.3, < 5.0)
jekyll-relative-links (0.6.1)
jekyll (>= 3.3, < 5.0)
jekyll-remote-theme (0.4.3)
addressable (~> 2.0)
jekyll (>= 3.5, < 5.0)
jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
rubyzip (>= 1.3.0, < 3.0)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-sitemap (1.4.0)
jekyll (>= 3.7, < 5.0)
jekyll-swiss (1.0.0)
jekyll-theme-architect (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-cayman (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-dinky (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-hacker (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-leap-day (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-merlot (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-midnight (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-minimal (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-modernist (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-primer (0.6.0)
jekyll (> 3.5, < 5.0)
jekyll-github-metadata (~> 2.9)
jekyll-seo-tag (~> 2.0)
jekyll-theme-slate (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-tactile (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-time-machine (0.2.0)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-titles-from-headings (0.5.3)
jekyll (>= 3.3, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
jemoji (0.12.0)
gemoji (~> 3.0)
html-pipeline (~> 2.2)
jekyll (>= 3.0, < 5.0)
kramdown (2.3.2)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.8.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.3.6)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
minitest (5.18.1)
nokogiri (1.15.3-x86_64-linux)
racc (~> 1.4)
octokit (4.25.1)
faraday (>= 1, < 3)
sawyer (~> 0.9)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (4.0.7)
racc (1.7.1)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
ffi (~> 1.0)
rexml (3.2.5)
rouge (3.26.0)
ruby2_keywords (0.0.5)
rubyzip (2.3.2)
safe_yaml (1.0.5)
sass (3.7.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
sawyer (0.9.2)
addressable (>= 2.3.5)
faraday (>= 0.17.3, < 3)
simpleidn (0.2.1)
unf (~> 0.1.4)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
typhoeus (1.4.0)
ethon (>= 0.9.0)
tzinfo (2.0.6)
concurrent-ruby (~> 1.0)
unf (0.1.4)
unf_ext
unf_ext (0.0.8.2)
unicode-display_width (1.8.0)
webrick (1.8.1)
PLATFORMS
x86_64-linux
DEPENDENCIES
github-pages (~> 228)
http_parser.rb (~> 0.6.0)
jekyll (~> 3.9.3)
minima (~> 2.5.1)
tzinfo (>= 1, < 3)
tzinfo-data
wdm (~> 0.1.1)
webrick
BUNDLED WITH
2.4.17

@ -0,0 +1,58 @@
# Welcome to Jekyll!
#
# This config file is meant for settings that affect your whole blog, values
# which you are expected to set up once and rarely edit after that. If you find
# yourself editing this file very often, consider using Jekyll's data files
# feature for the data you need to update frequently.
#
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
#
# If you need help with YAML syntax, here are some quick references for you:
# https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml
# https://learnxinyminutes.com/docs/yaml/
#
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: FaceSwap Lab
description: >- # this means to ignore newlines until "baseurl:"
FaceSwapLab is an extension for Stable Diffusion that simplifies face-swapping.
Some key functions of FaceSwapLab include the ability to reuse faces via checkpoints,
batch process images, sort faces based on size or gender, and support for vladmantic.
domain: glucauze.github.io
url: https://glucauze.github.io
baseurl: /sd-webui-faceswaplab/
# Build settings
theme: minima
author:
name: Glucauze
email: ""
minima:
skin: dark
# Exclude from processing.
# The following items will not be processed, by default.
# Any item listed under the `exclude:` key here will be automatically added to
# the internal "default list".
#
# Excluded items can be processed by explicitly listing the directories or
# their entries' file path in the `include:` list.
#
# exclude:
# - .sass-cache/
# - .jekyll-cache/
# - gemfiles/
# - Gemfile
# - Gemfile.lock
# - node_modules/
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

@ -0,0 +1,59 @@
@charset "utf-8";
// Define defaults for each variable.
$base-font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol" !default;
$base-font-size: 16px !default;
$base-font-weight: 400 !default;
$small-font-size: $base-font-size * 0.875 !default;
$base-line-height: 1.5 !default;
$spacing-unit: 30px !default;
$text-color: #e7f6f2!default;
$background-color: #2c3333 !default;
$brand-color: #FF8C6A !default;
$grey-color: lighten($brand-color, 30%) !default;
$grey-color-light: lighten($grey-color, 40%) !default;
$grey-color-dark: darken($grey-color, 25%) !default;
$table-text-align: left !default;
// Width of the content area
$content-width: 800px !default;
$on-palm: 600px !default;
$on-laptop: 800px !default;
// Use media queries like this:
// @include media-query($on-palm) {
// .wrapper {
// padding-right: $spacing-unit / 2;
// padding-left: $spacing-unit / 2;
// }
// }
@mixin media-query($device) {
@media screen and (max-width: $device) {
@content;
}
}
@mixin relative-font-size($ratio) {
font-size: $base-font-size * $ratio;
}
// Import partials.
@import
"minima/base",
"minima/layout",
"minima/syntax-highlighting"
;
img{
display: block;
margin : 1em;
margin-left:auto;
margin-right:auto;
}

@ -0,0 +1,247 @@
/**
* Reset some basic elements
*/
body, h1, h2, h3, h4, h5, h6,
p, blockquote, pre, hr,
dl, dd, ol, ul, figure {
margin: 0;
padding: 0;
}
/**
* Basic styling
*/
body {
font: $base-font-weight #{$base-font-size}/#{$base-line-height} $base-font-family;
color: $text-color;
background-color: $background-color;
-webkit-text-size-adjust: 100%;
-webkit-font-feature-settings: "kern" 1;
-moz-font-feature-settings: "kern" 1;
-o-font-feature-settings: "kern" 1;
font-feature-settings: "kern" 1;
font-kerning: normal;
display: flex;
min-height: 100vh;
flex-direction: column;
}
/**
* Set `margin-bottom` to maintain vertical rhythm
*/
h1, h2, h3, h4, h5, h6,
p, blockquote, pre,
ul, ol, dl, figure,
%vertical-rhythm {
margin-bottom: $spacing-unit / 2;
}
/**
* `main` element
*/
main {
display: block; /* Default value of `display` of `main` element is 'inline' in IE 11. */
}
/**
* Images
*/
img {
max-width: 100%;
vertical-align: middle;
}
/**
* Figures
*/
figure > img {
display: block;
}
figcaption {
font-size: $small-font-size;
}
/**
* Lists
*/
ul, ol {
margin-left: $spacing-unit;
}
li {
> ul,
> ol {
margin-bottom: 0;
}
}
/**
* Headings
*/
h1, h2, h3, h4, h5, h6 {
font-weight: $base-font-weight;
}
/**
* Links
*/
a {
color: $brand-color;
text-decoration: none;
&:visited {
color: darken($brand-color, 15%);
}
&:hover {
color: $text-color;
text-decoration: underline;
}
.social-media-list &:hover {
text-decoration: none;
.username {
text-decoration: underline;
}
}
}
/**
* Blockquotes
*/
blockquote {
color: $grey-color;
border-left: 4px solid $grey-color-light;
padding-left: $spacing-unit / 2;
@include relative-font-size(1.125);
letter-spacing: -1px;
font-style: italic;
> :last-child {
margin-bottom: 0;
}
}
/**
* Code formatting
*/
pre,
code {
@include relative-font-size(0.9375);
border: 1px solid $grey-color-light;
border-radius: 3px;
}
code {
padding: 1px 5px;
}
pre {
padding: 8px 12px;
overflow-x: auto;
> code {
border: 0;
padding-right: 0;
padding-left: 0;
}
}
/**
* Wrapper
*/
.wrapper {
max-width: -webkit-calc(#{$content-width} - (#{$spacing-unit} * 2));
max-width: calc(#{$content-width} - (#{$spacing-unit} * 2));
margin-right: auto;
margin-left: auto;
padding-right: $spacing-unit;
padding-left: $spacing-unit;
@extend %clearfix;
@include media-query($on-laptop) {
max-width: -webkit-calc(#{$content-width} - (#{$spacing-unit}));
max-width: calc(#{$content-width} - (#{$spacing-unit}));
padding-right: $spacing-unit / 2;
padding-left: $spacing-unit / 2;
}
}
/**
* Clearfix
*/
%clearfix:after {
content: "";
display: table;
clear: both;
}
/**
* Icons
*/
.svg-icon {
width: 16px;
height: 16px;
display: inline-block;
fill: #{$grey-color};
padding-right: 5px;
vertical-align: text-top;
}
.social-media-list {
li + li {
padding-top: 5px;
}
}
/**
* Tables
*/
table {
margin-bottom: $spacing-unit;
width: 100%;
text-align: $table-text-align;
color: lighten($text-color, 18%);
border-collapse: collapse;
border: 1px solid $grey-color-light;
th, td {
padding: ($spacing-unit / 3) ($spacing-unit / 2);
}
th {
border: 1px solid darken($grey-color-light, 4%);
border-bottom-color: darken($grey-color-light, 12%);
}
td {
border: 1px solid $grey-color-light;
}
}

@ -0,0 +1,255 @@
/**
* Site header
*/
.site-header {
border-top: 5px solid $grey-color-dark;
border-bottom: 1px solid $grey-color-light;
min-height: $spacing-unit * 1.865;
// Positioning context for the mobile navigation icon
position: relative;
}
.site-title {
@include relative-font-size(1.625);
font-weight: 300;
line-height: $base-line-height * $base-font-size * 2.25;
letter-spacing: -1px;
margin-bottom: 0;
float: left;
&,
&:visited {
color: $grey-color-dark;
}
}
.site-nav {
float: right;
line-height: $base-line-height * $base-font-size * 2.25;
.nav-trigger {
display: none;
}
.menu-icon {
display: none;
}
.page-link {
color: $text-color;
line-height: $base-line-height;
// Gaps between nav items, but not on the last one
&:not(:last-child) {
margin-right: 20px;
}
}
@include media-query($on-palm) {
position: absolute;
top: 9px;
right: $spacing-unit / 2;
background-color: $background-color;
border: 1px solid $grey-color-light;
border-radius: 5px;
text-align: right;
label[for="nav-trigger"] {
display: block;
float: right;
width: 36px;
height: 36px;
z-index: 2;
cursor: pointer;
}
.menu-icon {
display: block;
float: right;
width: 36px;
height: 26px;
line-height: 0;
padding-top: 10px;
text-align: center;
> svg {
fill: $grey-color-dark;
}
}
input ~ .trigger {
clear: both;
display: none;
}
input:checked ~ .trigger {
display: block;
padding-bottom: 5px;
}
.page-link {
display: block;
padding: 5px 10px;
&:not(:last-child) {
margin-right: 0;
}
margin-left: 20px;
}
}
}
/**
* Site footer
*/
.site-footer {
border-top: 1px solid $grey-color-light;
padding: $spacing-unit 0;
}
.footer-heading {
@include relative-font-size(1.125);
margin-bottom: $spacing-unit / 2;
}
.contact-list,
.social-media-list {
list-style: none;
margin-left: 0;
}
.footer-col-wrapper {
@include relative-font-size(0.9375);
color: $grey-color;
margin-left: -$spacing-unit / 2;
@extend %clearfix;
}
.footer-col {
float: left;
margin-bottom: $spacing-unit / 2;
padding-left: $spacing-unit / 2;
}
.footer-col-1 {
width: -webkit-calc(35% - (#{$spacing-unit} / 2));
width: calc(35% - (#{$spacing-unit} / 2));
}
.footer-col-2 {
width: -webkit-calc(20% - (#{$spacing-unit} / 2));
width: calc(20% - (#{$spacing-unit} / 2));
}
.footer-col-3 {
width: -webkit-calc(45% - (#{$spacing-unit} / 2));
width: calc(45% - (#{$spacing-unit} / 2));
}
@include media-query($on-laptop) {
.footer-col-1,
.footer-col-2 {
width: -webkit-calc(50% - (#{$spacing-unit} / 2));
width: calc(50% - (#{$spacing-unit} / 2));
}
.footer-col-3 {
width: -webkit-calc(100% - (#{$spacing-unit} / 2));
width: calc(100% - (#{$spacing-unit} / 2));
}
}
@include media-query($on-palm) {
.footer-col {
float: none;
width: -webkit-calc(100% - (#{$spacing-unit} / 2));
width: calc(100% - (#{$spacing-unit} / 2));
}
}
/**
* Page content
*/
.page-content {
padding: $spacing-unit 0;
flex: 1;
}
.page-heading {
@include relative-font-size(2);
}
.post-list-heading {
@include relative-font-size(1.75);
}
.post-list {
margin-left: 0;
list-style: none;
> li {
margin-bottom: $spacing-unit;
}
}
.post-meta {
font-size: $small-font-size;
color: $grey-color;
}
.post-link {
display: block;
@include relative-font-size(1.5);
}
/**
* Posts
*/
.post-header {
margin-bottom: $spacing-unit;
}
.post-title {
@include relative-font-size(2.625);
letter-spacing: -1px;
line-height: 1;
@include media-query($on-laptop) {
@include relative-font-size(2.25);
}
}
.post-content {
margin-bottom: $spacing-unit;
h2 {
@include relative-font-size(2);
@include media-query($on-laptop) {
@include relative-font-size(1.75);
}
}
h3 {
@include relative-font-size(1.625);
@include media-query($on-laptop) {
@include relative-font-size(1.375);
}
}
h4 {
@include relative-font-size(1.25);
@include media-query($on-laptop) {
@include relative-font-size(1.125);
}
}
}

@ -0,0 +1,71 @@
/**
* Syntax highlighting styles
*/
.highlight {
background: #fff;
@extend %vertical-rhythm;
.highlighter-rouge & {
background: black;
}
.c { color: #998; font-style: italic } // Comment
.err { color: #a61717; background-color: #e3d2d2 } // Error
.k { font-weight: bold } // Keyword
.o { font-weight: bold } // Operator
.cm { color: #998; font-style: italic } // Comment.Multiline
.cp { color: #999; font-weight: bold } // Comment.Preproc
.c1 { color: #998; font-style: italic } // Comment.Single
.cs { color: #999; font-weight: bold; font-style: italic } // Comment.Special
.gd { color: #000; background-color: #fdd } // Generic.Deleted
.gd .x { color: #000; background-color: #faa } // Generic.Deleted.Specific
.ge { font-style: italic } // Generic.Emph
.gr { color: #a00 } // Generic.Error
.gh { color: #999 } // Generic.Heading
.gi { color: #000; background-color: #dfd } // Generic.Inserted
.gi .x { color: #000; background-color: #afa } // Generic.Inserted.Specific
.go { color: #888 } // Generic.Output
.gp { color: #555 } // Generic.Prompt
.gs { font-weight: bold } // Generic.Strong
.gu { color: #aaa } // Generic.Subheading
.gt { color: #a00 } // Generic.Traceback
.kc { font-weight: bold } // Keyword.Constant
.kd { font-weight: bold } // Keyword.Declaration
.kp { font-weight: bold } // Keyword.Pseudo
.kr { font-weight: bold } // Keyword.Reserved
.kt { color: #458; font-weight: bold } // Keyword.Type
.m { color: #099 } // Literal.Number
.s { color: #d14 } // Literal.String
.na { color: #008080 } // Name.Attribute
.nb { color: #0086B3 } // Name.Builtin
.nc { color: #458; font-weight: bold } // Name.Class
.no { color: #008080 } // Name.Constant
.ni { color: #800080 } // Name.Entity
.ne { color: #900; font-weight: bold } // Name.Exception
.nf { color: #900; font-weight: bold } // Name.Function
.nn { color: #777 } // Name.Namespace
.nt { color: #000080 } // Name.Tag
.nv { color: #008080 } // Name.Variable
.ow { font-weight: bold } // Operator.Word
.w { color: #bbb } // Text.Whitespace
.mf { color: #099 } // Literal.Number.Float
.mh { color: #099 } // Literal.Number.Hex
.mi { color: #099 } // Literal.Number.Integer
.mo { color: #099 } // Literal.Number.Oct
.sb { color: #d14 } // Literal.String.Backtick
.sc { color: #d14 } // Literal.String.Char
.sd { color: #d14 } // Literal.String.Doc
.s2 { color: #d14 } // Literal.String.Double
.se { color: #d14 } // Literal.String.Escape
.sh { color: #d14 } // Literal.String.Heredoc
.si { color: #d14 } // Literal.String.Interpol
.sx { color: #d14 } // Literal.String.Other
.sr { color: #009926 } // Literal.String.Regex
.s1 { color: #d14 } // Literal.String.Single
.ss { color: #990073 } // Literal.String.Symbol
.bp { color: #999 } // Name.Builtin.Pseudo
.vc { color: #008080 } // Name.Variable.Class
.vg { color: #008080 } // Name.Variable.Global
.vi { color: #008080 } // Name.Variable.Instance
.il { color: #099 } // Literal.Number.Integer.Long
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 171 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

@ -0,0 +1,164 @@
---
layout: page
title: Documentation
permalink: /doc/
---
# Main Interface
Here is the interface for FaceSwap Lab. It is available in the form of an accordion in both img2img and txt2img.
You can configure several units, each allowing you to replace a face. Here, 3 units are available: Face 1, Face 2, and Face 3. After the face replacement, the post-processing part is called.
![](/assets/images/doc_mi.png)
#### Face Unit
The first thing to do is to activate the unit with **'enable'** if you want to use it.
Here are the main options for configuring a unit:
+ **Reference:** This is the reference face. The face from the image will be extracted and used for face replacement. You can select which face is used with the "Reference source face" option at the bottom of the unit.
+ **Batch Source Image:** The images dropped here will be used in addition to the reference or checkpoint. If "Blend Faces" is checked, the faces will be merged into a single face by averaging the characteristics. If not, a new image will be created for each face.
+ **Face Checkpoint:** Allows you to reuse a face in the form of a checkpoint. Checkpoints are built with the build tool.
+ **Blend Faces** : Insighface works by creating an embedding for each face. An embedding is essentially a condensed representation of the facial characteristics. The face blending process allows for the averaging of multiple face embeddings to generate a blended or composite face. If face blending is enabled, the batch sources faces and the reference image will be merged into a single face. This is enabled by default.
**You must always have at least one reference face OR a checkpoint. If both are selected, the checkpoint will be used and the reference ignored.**
Always check for errors in the SD console. In particular, the absence of a reference face or a checkpoint can trigger errors.
+ **Comparison of faces** with the obtained swapped face: The swapped face can be compared to the original face using a distance function. The higher this value (from 1 to 0), the more similar the faces are. This calculation is performed if you activate **"Compute Similarity"** or **"Check Similarity"**. If you check the latter, you will have the opportunity to filter the output images with:
+ **Min similarity:** compares the obtained face to the calculated (blended) face. If this value is not exceeded, then the face is not kept.
+ **Min reference similarity:** compares the face to the reference face only. If this value is not exceeded, then the face is not kept.
+ **Selection of the face to be replaced in the image:** You can choose the face(s) to be replaced in the image by indicating their index. Faces are numbered from top-left to bottom-right starting from 0. If you check
+ **Same gender:** the gender of the source face will be determined and only faces of the same gender will be considered.
+ **Sort by size:** faces will be sorted from largest to smallest.
#### Post-processing
The post-processing window looks very much like what you might find in the extra options, except for the inpainting part. The process takes place after all units have swapped faces.
The entire image and all faces will be affected by the post-processing (unlike what you might find in the upscaled inswapper options). If you're already applying a face restorer in the upscaled inswapper, the face_restorer of the post-processing will also be applied. This is probably not what you want, and in this case it might be better to leave it as None.
In the current version, upscaling always occurs before face restoration.
![](/assets/images/doc_pp.png)
The inpainting part works in the same way as adetailer. It sends each face to img2img for transformation. This is useful for adding details to faces. The stronger the denoising, the more likely you are to lose the resemblance of the face. Some samplers seem to better preserve this resemblance than others. You can use a specific model for the replacement, different from the model used for the generation.
For inpainting to be active, denoising must be greater than 0 and the Inpainting When option must be set to:
+ Never means that no inpainting will be done.
+ Before upscaling means that inpainting will occur before upscaling.
+ Before restore face means the operation will occur between upscaling and face restoration.
+ After all means that inpainting will be done last.
**For now, inpainting applies to all faces, including those that have not been modified.**
## Tab
The tab provides access to various tools:
![](/assets/images/doc_tab.png)
+ the use of a **batch process** with post-processing. The tool works like the main interface, except that stable diffusion is not called. Drop your images, choose a source directory (mandatory), configure the faces and click on Process&Save to get the replaced images.
+ the tool part allows you to:
+ **build** one of the face checkpoints
+ **compare** two faces, works the same way as the compute similarity
+ **extract faces** from images
+ **explore the model** (not very useful at the moment)
+ **analyze a face** : This will give the output of the insightface analysis model on the first face found.
+ **Build Checkpoints**: The FaceTools tab now allows creating checkpoints, which facilitate face reuse. When a checkpoint is used, it takes precedence over the reference image, and the reference source image is discarded.
The faces from different images will be merged and stored under the given face name (no special characters or spaces). All checkpoints are stored in `models/faceswaplab/faces`.
Once finished, the process gives a preview of the face using the images contained in the references directory (man or woman depending on the detected gender).
![](/assets/images/checkpoints.png)
The checkpoint can then be used in the main interface (use refresh button)
![](/assets/images/checkpoints_use.png)
## Upscaled-inswapper
The 'Upscaled Inswapper' is an option in SD FaceSwapLab which allows for upscaling of each face using an upscaller prior to its integration into the image. This is achieved by modifying a small segment of the InsightFace code.
The purpose of this feature is to enhance the quality of the face in the final image. While this process might slightly increase the processing time, it can deliver improved results. In certain cases, this could even eliminate the need for additional tools such as Codeformer or GFPGAN in postprocessing. See the processing order section to understand when and how it is used.
![](/assets/images/upscaled_settings.png)
The upscaled inswapper is disabled by default. It can be enabled in the sd options. Understanding the various steps helps explain why results may be unsatisfactory and how to address this issue.
+ **upscaler** : LDSR if None. The LDSR option generally gives the best results but at the expense of a lot of computational time. You should test other models to form an opinion. The 003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN model seems to give good results in a reasonable amount of time. It's not possible to disable upscaling, but it is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory.
+ **restorer** : The face restorer to be used if necessary. Codeformer generally gives good results.
+ **sharpening** can provide more natural results, but it may also add artifacts. The same goes for **color correction**. By default, these options are set to False.
+ **improved mask:** The segmentation mask for the upscaled swapper is designed to avoid the square mask and prevent degradation of the non-face parts of the image. It is based on the Codeformer implementation. If "Use improved segmented mask (use pastenet to mask only the face)" and "upscaled inswapper" are checked in the settings, the mask will only cover the face, and will not be squared. However, depending on the image, this might introduce different types of problems such as artifacts on the border of the face.
+ **fthresh and erosion factor:** it is possible to adjust the mask erosion parameters using the fthresh and erosion settings. The higher these settings are (particularly erosion), the more the mask is reduced.
## Processing order:
The extension is activated after all other extensions have been processed. During the execution, several steps take place.
**Step 1 :** The first step is the construction of a swapped face. For this, the faces are extracted and their resolution is reduced to a square resolution of 128px. This is the native resolution of the model and this explains why the quality suffers. The generated face is also of the same resolution.
![](/assets/images/step1.png)
**Step 2 (Optionnal) :** Secondly, and **only if upscaled inswapper is enabled**, the low-resolution face is corrected by applying the selected transformations:
![](/assets/images/step2.png)
**Step 3a (Default) :** The face is replaced in the original image. By default, InsightFace uses a square mask which it erodes to blend into the original image. This explains why clothing or other things may be affected by the replacement. When "upscaled inswapper" is used, it is possible to adjust the mask erosion parameters using the fthresh and erosion settings. The higher these settings are (particularly erosion), the more the mask is reduced.
![](/assets/images/step3a.png)
**Step 3b (improved mask) :** If the "improved mask" option is enabled and the "upscaled inswapper" is used, then a segmented mask will be calculated on both faces. Therefore, this mask will be more limited in the affected area.
![](/assets/images/step3b.png)
**Step 4 (Post-processing and inpainting options) :** Finally postprocessing and impainting are perfomed on the image.
![](/assets/images/step4.png)
## Settings
Here are the parameters that can be configured in sd settings and their default values
### General Settings :
Name | Description | Default Value
---|---|---
faceswaplab_model | Insightface model to use| models[0] if len(models) &gt; 0 else "None"
faceswaplab_keep_original | keep original image before swapping. It true, will show original image | False
faceswaplab_units_count | How many faces units to use(requires restart) | 3
faceswaplab_detection_threshold | Detection threshold to use to detect face, if low will detect non human face as face | 0.5
### Default Settings :
These parameters are used to configure the default settings displayed in post-processing.
Name | Description | Default Value
faceswaplab_pp_default_face_restorer | UI Default post processing face restorer (requires restart) | None
faceswaplab_pp_default_face_restorer_visibility | UI Default post processing face restorer visibility (requires restart) | 1
faceswaplab_pp_default_face_restorer_weight | UI Default post processing face restorer weight (requires restart) | 1
faceswaplab_pp_default_upscaler | UI Default post processing upscaler (requires restart) | None
faceswaplab_pp_default_upscaler_visibility | UI Default post processing upscaler visibility(requires restart) | 1
### Upscaled inswapper Settings :
These parameters are used to control the upscaled inswapper, see above.
Name | Description | Default Value
faceswaplab_upscaled_swapper | Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image | False
faceswaplab_upscaled_swapper_upscaler | Upscaled swapper upscaler (Recommended : LDSR but slow) | None
faceswaplab_upscaled_swapper_sharpen | Upscaled swapper sharpen | False
faceswaplab_upscaled_swapper_fixcolor | Upscaled swapper color correction | False
faceswaplab_upscaled_improved_mask | Use improved segmented mask (use pastenet to mask only the face) | True
faceswaplab_upscaled_swapper_face_restorer | Upscaled swapper face restorer | None
faceswaplab_upscaled_swapper_face_restorer_visibility | Upscaled swapper face restorer visibility | 1
faceswaplab_upscaled_swapper_face_restorer_weight | Upscaled swapper face restorer weight (codeformer) | 1
faceswaplab_upscaled_swapper_fthresh | Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact | 10
faceswaplab_upscaled_swapper_erosion | Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible | 1

@ -0,0 +1,15 @@
---
layout: page
title: Examples
permalink: /examples/
---
These examples show how to use a painting as a source. No post-processing is activated, only the upscaled inswapper with LDSR, Codeformer and segmented mask.
Moliere:
![](/assets/images/example1.png)
Napoleon :
![](/assets/images/example2.png)

@ -0,0 +1,135 @@
---
layout: page
title: FAQ
permalink: /faq/
---
Our issue tracker often contains requests that may originate from a misunderstanding of the software's functionality. We aim to address these queries; however, due to time constraints, we may not be able to respond to each request individually. This FAQ section serves as a preliminary source of information for commonly raised concerns. We recommend reviewing these before submitting an issue.
#### Improving Quality of Results
To get better quality results:
1. Ensure that the "Restore Face" option is enabled.
2. Consider using the "Upscaler" option. For finer control, you can use an upscaler from the "Extras" tab.
3. Use img2img with the denoise parameter set to `0.1`. Gradually increase this parameter until you achieve a balance of quality and resemblance.
You can also use the uspcaled inswapper. I mainly use it with the following options :
![](/assets/images/inswapper_options.png)
#### Replacing Specific Faces
If an image contains multiple faces and you want to swap specific ones: Use the "Comma separated face number(s)" option to select the face numbers you wish to swap.
#### Issues with Face Swapping
If a face did not get swapped, please check the following:
1. Ensure that the "Enable" option has been checked.
2. If you've ensured the above and your console doesn't show any errors, it means that the FaceSwapLab was unable to detect a face in your image or the image was detected as NSFW (Not Safe For Work).
#### Controversy Surrounding NSFW Content Filtering
We understand that some users might wish to have the option to disable content filtering, particularly for Not Safe for Work (NSFW) content. However, it's important to clarify our stance on this matter. We are not categorically against NSFW content. The concern arises specifically when the software is used to superimpose the face of a real person onto NSFW content.
If it were reliably possible to ensure that the faces being swapped were synthetic and not tied to real individuals, the inclusion of NSFW content would pose less of an ethical dilemma. However, in the absence of such a guarantee, making this feature easily accessible could potentially lead to misuse, which is an ethically risky scenario.
This is not our intention to impose our moral perspectives. Our goal is to comply with the requirements of the models used in the software and establish a balanced boundary that respects individual privacy and prevents potential misuse.
Requests to provide an option to disable the content filter will not be considered.
#### What is the role of the segmentation mask for the upscaled swapper?
The segmentation mask for the upscaled swapper is designed to avoid the square mask and prevent degradation of the non-face parts of the image. It is based on the Codeformer implementation. If "Use improved segmented mask (use pastenet to mask only the face)" and "upscaled inswapper" are checked in the settings, the mask will only cover the face, and will not be squared. However, depending on the image, this might introduce different types of problems such as artifacts on the border of the face.
#### How to increase speed of upscaled inswapper?
It is possible to choose LANCZOS for speed if Codeformer is enabled in the upscaled inswapper. The result is generally satisfactory.
#### Sharpening and color correction in upscaled swapper :
Sharpening can provide more natural results, but it may also add artifacts. The same goes for color correction. By default, these options are set to False.
#### I don't see any extension after restart
If you do not see any extensions after restarting, it is likely due to missing requirements, particularly if you're using Windows. Follow the instructions below:
1. Verify that there are no error messages in the terminal.
2. Double-check the Installation section of this document to ensure all the steps have been followed.
If you are running a specific configuration (for example, Python 3.11), please test the extension with a clean installation of the stable version of Diffusion before reporting an issue. This can help isolate whether the problem is related to your specific configuration or a broader issue with the extension.
#### Understanding Quality of Results
The model used in this extension initially reduces the resolution of the target face before generating a 128x128 image. This means that regardless of the original image's size, the resolution of the processed faces will not exceed 128x128. Consequently, this lower resolution might lead to quality limitations in the results.
The output of this process might not meet high expectations, but the use of the face restorer and upscaler can help improve these results to some extent.
The quality of results is inherently tied to the capabilities of the model and cannot be enhanced beyond its design. FaceSwapLab merely provides an interface for the underlying model. Therefore, unless the model from insighface is retrained and necessary alterations are made in the library (see below), the resulting quality may not meet high expectations.
Consider this extension as a low-cost alternative to more sophisticated tools like Lora, or as an addition to such tools. It's important to **maintain realistic expectations of the results** provided by this extension.
#### Issue: Incorrect Gender Detection
The gender detection functionality is handled by the underlying analysis model. As such, there might be instances where the detected gender may not be accurate. This is a limitation of the model and we currently do not have a way to improve this accuracy from our end.
#### Why isn't GPU support included?
While implementing GPU support may seem straightforward, simply requiring a modification to the onnxruntime implementation and a change in providers in the swapper, there are reasons we haven't included it as a standard option.
The primary consideration is the substantial VRAM usage of the SD models. Integrating the model on the GPU doesn't result in significant performance gains with the current state of the software. Moreover, the GPU support becomes truly beneficial when processing large numbers of frames or video. However, our experience indicates that this tends to cause more issues than it resolves.
Consequently, requests for GPU support as a standard feature will not be considered.
#### What is the 'Upscaled Inswapper' Option in SD FaceSwapLab?
The 'Upscaled Inswapper' is an option in SD FaceSwapLab which allows for upscaling of each face using an upscaller prior to its integration into the image. This is achieved by modifying a small segment of the InsightFace code.
The purpose of this feature is to enhance the quality of the face in the final image. While this process might slightly increase the processing time, it can deliver improved results. In certain cases, this could even eliminate the need for additional tools such as Codeformer or GFPGAN in postprocessing.
#### What is Face Blending?
Insighface works by creating an embedding for each face. An embedding is essentially a condensed representation of the facial characteristics.
The face blending process allows for the averaging of multiple face embeddings to generate a blended or composite face.
The benefits of face blending include:
+ Generation of a high-quality embedding based on multiple faces, thereby improving the face's representative accuracy.
+ Creation of a composite face that includes features from multiple individuals, which can be useful for diverse face recognition scenarios.
To create a composite face, two methods are available:
1. Use the Checkpoint Builder: This tool allows you to save a set of face embeddings that can be loaded later to create a blended face.
2. Use Image Batch Sources: By dropping several images into this tool, you can generate a blended face based on the faces in the provided images.
#### What is a face checkpoint?
A face checkpoint is a saved embedding of a face, generated from multiple images. This is accomplished via the build tool located in the `sd` tab. The build tool blends all images dropped into the tab and saves the resulting embedding to a file.
The primary advantage of face checkpoints is their size. An embedding is only around 2KB, meaning it's lightweight and can be reused later without requiring additional calculations.
Face checkpoints are saved as `.pkl` files. Please be aware that exchanging `.pkl` files carries potential security risks. These files, by default, are not secure and could potentially execute malicious code when opened. Therefore, extreme caution should be exercised when sharing or receiving this type of file.
#### How is similarity determined?
The similarity between faces is established by comparing their embeddings. In this context, a score of 1 signifies that the two faces are identical, while a score of 0 indicates that the faces are different.
You can remove images from the results if the generated image does not match the reference. This is done by adjusting the sliders in the "Faces" tab.
#### Which model is used?
The model employed here is based on InsightFace's "InSwapper". For more specific information, you can refer [here](https://github.com/deepinsight/insightface/blob/fc622003d5410a64c96024563d7a093b2a55487c/python-package/insightface/model_zoo/inswapper.py#L12).
This model was temporarily made public by the InsightFace team for research purposes. They have not provided any details about the training methodology.
The model generates faces with a resolution of 128x128, which is relatively low. For better results, the generated faces need to be upscaled. The InsightFace code is not designed for higher resolutions (see the [Router](https://github.com/deepinsight/insightface/blob/fc622003d5410a64c96024563d7a093b2a55487c/python-package/insightface/model_zoo/model_zoo.py#L35) class for more information).
#### Why not use SimSwap?
SimSwap models are based on older InsightFace architectures, and SimSwap has not been released as a Python package. Its incorporation would complicate the process, and it does not guarantee any substantial gain.
If you manage to implement SimSwap successfully, feel free to submit a pull request.

@ -0,0 +1,120 @@
---
layout: page
title: Features
permalink: /features/
---
+ **Face Unit Concept**: Similar to controlNet, the program introduces the concept of a face unit. You can configure up to 10 units (3 units are the default setting) in the program settings (sd).
![](/assets/images/face_units.png)
+ **Vladmantic and a1111 Support**
+ **Batch Processing**
+ **Inpainting**: supports "only masked" and mask inpainting.
+ **Performance Improvements**: The overall performance of the software has been enhanced.
+ **FaceSwapLab Tab** providing various tools.
![](/assets/images/tab.png)
+ **FaceSwapLab Settings**: FaceSwapLab settings are now part of the sd settings. To access them, navigate to the sd settings section.
![](/assets/images/settings.png)
+ **Face Reuse Via Checkpoints**: The FaceTools tab now allows creating checkpoints, which facilitate face reuse. When a checkpoint is used, it takes precedence over the reference image, and the reference source image is discarded.
![](/assets/images/checkpoints.png)
![](/assets/images/checkpoints_use.png)
+ **Gender Detection**: The program can now detect gender based on faces.
![](/assets/images/gender.png)
+ **Face Combination (Blending)**: Multiple versions of a face can be combined to enhance the swapping result. This blending happens during checkpoint creation.
![](/assets/images/blend_face.png)
![](/assets/images/testein.png)
+ **Preserve Original Images**: You can opt to keep original images before the swapping process.
![](/assets/images/keep_orig.png)
+ **Multiple Face Versions for Replacement**: The program allows the use of multiple versions of the same face for replacement.
![](/assets/images/multiple_face_src.png)
+ **Face Similarity and Filtering**: You can compare faces against the reference and/or source images.
![](/assets/images/similarity.png)
+ **Face Comparison**: face comparison feature.
![](/assets/images/compare.png)
+ **Face Extraction**: face extraction with or without upscaling.
![](/assets/images/extract.png)
+ **Improved Post-Processing**: codeformer, gfpgan, upscaling.
![](/assets/images/post-processing.png)
+ **Post Inpainting**: This feature allows the application of image-to-image inpainting specifically to faces.
![](/assets/images/postinpainting.png)
![](/assets/images/postinpainting_result.png)
+ **Upscaled Inswapper**: The program now includes an upscaled inswapper option, which improves results by incorporating upsampling, sharpness adjustment, and color correction before face is merged to the original image.
![](/assets/images/upscalled_swapper.png)
+ **API with typing support** :
```python
import base64
import io
import requests
from PIL import Image
from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64
address = 'http:/127.0.0.1:7860'
# First face unit :
unit1 = FaceSwapUnit(
source_img=pil_to_base64("../../references/man.png"), # The face you want to use
faces_index=(0,) # Replace first face
)
# Second face unit :
unit2 = FaceSwapUnit(
source_img=pil_to_base64("../../references/woman.png"), # The face you want to use
same_gender=True,
faces_index=(0,) # Replace first woman since same gender is on
)
# Post-processing config :
pp = PostProcessingOptions(
face_restorer_name="CodeFormer",
codeformer_weight=0.5,
restorer_visibility= 1)
# Prepare the request
request = FaceSwapRequest (
image = pil_to_base64("test_image.png"),
units= [unit1, unit2],
postprocessing=pp
)
result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"})
response = FaceSwapResponse.parse_obj(result.json())
for img, info in zip(response.pil_images, response.infos):
img.show(title = info)
```

@ -0,0 +1,67 @@
---
# Feel free to add content and custom Front Matter to this file.
# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
layout: home
---
FaceSwapLab is an extension for Stable Diffusion that simplifies the use of [insighface models](https://insightface.ai/) for face-swapping. It has evolved from sd-webui-faceswap and some part of sd-webui-roop. However, a substantial amount of the code has been rewritten to improve performance and to better manage masks.
Some key [features](features) include the ability to reuse faces via checkpoints, multiple face units, batch process images, sort faces based on size or gender, and support for vladmantic. It also provides a face inpainting feature.
![](/assets/images/main_interface.png)
Link to github repo : [https://github.com/glucauze/sd-webui-faceswaplab](https://github.com/glucauze/sd-webui-faceswaplab)
While FaceSwapLab is still under development, it has reached a good level of stability. This makes it a reliable tool for those who are interested in face-swapping within the Stable Diffusion environment. As with all projects of this type, it's expected to improve and evolve over time.
## Disclaimer and license
In short:
+ **Ethical Consideration:** This extension should not be forked to create a public, easy way to circumvent NSFW filtering.
+ **License:** This software is distributed under the terms of the GNU Affero General Public License (AGPL), version 3 or later.
+ **Model License:** This software uses InsightFace's pre-trained models, which are available for non-commercial research purposes only.
### Ethical Perspective
This extension is **not intended to facilitate the creation of not safe for work (NSFW) or non-consensual deepfake content**. Its purpose is to bring consistency to image creation, making it easier to repair existing images, or bring characters back to life.
While the code for this extension is licensed under the AGPL in compliance with models and other source materials, it's important to stress that **we strongly discourage any attempts to fork this project to create an uncensored version**. Any modifications to the code to enable the production of such content would be contrary to the ethical guidelines we advocate for.
We will comply with European regulations regarding this type of software. As required by law, the code may include both visible and invisible watermarks. If your local laws prohibit the use of this extension, you should not use it.
From an ethical perspective, the main goal of this extension is to generate consistent images by swapping faces. It's important to note that we've done our best to integrate censorship features. However, when users can access the source code, they might bypass these censorship measures. That's why we urge users to use this extension responsibly and avoid any malicious use. We emphasize the importance of respecting people's privacy and consent when swapping faces in images. We discourage any activities that could harm others, invade their privacy, or negatively affect their well-being.
Additionally, we believe it's important to make the public aware of these tools and the ease with which deepfakes can be created. As technology improves, we need to be more critical and skeptical when we encounter media content. By promoting media literacy, we can reduce the negative impact of misusing these tools and encourage responsible use in the digital world.
### Software License
This software is distributed under the terms of the GNU Affero General Public License (AGPL), version 3 or later. It is provided "AS IS", without any express or implied warranties, including but not limited to the implied warranties of merchantability and fitness for a particular purpose. In no event shall the author be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. Users are encouraged to review the license in full and to use the software in accordance with its terms.
If any user violates their country's legal and ethical rules, we don't accept any liability for this code repository.
### Models License
This software utilizes the pre-trained models `buffalo_l` and `inswapper_128.onnx`, which are provided by InsightFace. These models are included under the following conditions:
_InsightFace's pre-trained models are available for non-commercial research purposes only. This includes both auto-downloading models and manually downloaded models._ from [insighface licence](https://github.com/deepinsight/insightface/tree/master/python-package)
Users of this software must strictly adhere to these conditions of use. The developers and maintainers of this software are not responsible for any misuse of InsightFace's pre-trained models.
Please note that if you intend to use this software for any commercial purposes, you will need to train your own models or find models that can be used commercially.
## Acknowledgments
This project contains code adapted from the following sources:
+ codeformer : https://github.com/sczhou/CodeFormer
+ PSFRGAN : https://github.com/chaofengc/PSFRGAN
+ insightface : https://insightface.ai/
+ ifnude : https://github.com/s0md3v/ifnude
+ sd-webui-roop : https://github.com/s0md3v/sd-webui-roop
## Alternatives
+ https://github.com/idinkov/sd-deepface-1111
+ https://github.com/s0md3v/sd-webui-roop

@ -0,0 +1,47 @@
---
layout: page
title: Install
permalink: /install/
---
## Requirements/Recommanded configuration
The extension runs mainly on the CPU to avoid the use of VRAM. However, it is recommended to follow the specifications recommended by sd/a1111 with regard to prerequisites. At the time of writing, a version of python lower than 11 is preferable (even if it works with python 3.11, model loading and performance may fall short of expectations).
### Windows-User : Visual Studio ! Don't neglect this !
Before beginning the installation process, if you are using Windows, you need to install this requirement:
1. Install Visual Studio 2022: This step is required to build some of the dependencies. You can use the Community version of Visual Studio 2022, which can be downloaded from the following link: https://visualstudio.microsoft.com/downloads/
2. OR Install only the VS C++ Build Tools: If you don't need the full Visual Studio suite, you can choose to install only the VS C++ Build Tools. During the installation process, select the option for "Desktop Development with C++" found under the "Workloads -> Desktop & Mobile" section. The VS C++ Build Tools can be downloaded from this link: https://visualstudio.microsoft.com/visual-cpp-build-tools/
3. OR if you don't want to install either the full Visual Studio suite or the VS C++ Build Tools: Follow the instructions provided in section VIII of the documentation.
## Manual Install
To install the extension, follow the steps below:
1. Open the `web-ui` application and navigate to the "Extensions" tab.
2. Use the URL `https://github.com/glucauze/sd-webui-faceswaplab` in the "install from URL" section.
3. Close the `web-ui` application and reopen it.
![](/assets/images/install_from_url.png)
**You may need to restart sd once the installation process is complete.**
On first launch, templates are downloaded, which may take some time. All models are located in the `models/faceswaplab` folder.
If you encounter the error `'NoneType' object has no attribute 'get'`, take the following steps:
1. Download the [inswapper_128.onnx](https://huggingface.co/henryruhs/faceswaplab/resolve/main/inswapper_128.onnx) model.
2. Place the downloaded model inside the `<webui_dir>/models/faceswaplab/` directory.
## Usage
To use this extension, follow the steps below:
1. Navigate to the "faceswaplab" drop-down menu and import an image that contains a face.
2. Enable the extension by checking the "Enable" checkbox.
3. After performing the steps above, the generated result will have the face you selected.

@ -0,0 +1,94 @@
from typing import List, Tuple
from PIL import Image
from pydantic import BaseModel, Field
from enum import Enum
import base64, io
from io import BytesIO
from typing import List, Tuple, Optional
class InpaintingWhen(Enum):
NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All"
class FaceSwapUnit(BaseModel) :
# The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
# The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None)
# base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
# Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True)
# Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True)
# If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False)
# if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False)
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0)
# The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,))
class PostProcessingOptions (BaseModel):
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0)
upscaler_name: str = Field(description='upscaler name', default=None)
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0)
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0)
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0)
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]")
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler")
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER)
class FaceSwapRequest(BaseModel) :
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
units : List[FaceSwapUnit]
postprocessing : PostProcessingOptions
class FaceSwapResponse(BaseModel) :
images : List[str] = Field(description='base64 swapped image',default=None)
infos : List[str]
@property
def pil_images(self) :
return [base64_to_pil(img) for img in self.images]
def pil_to_base64(img):
if isinstance(img, str):
img = Image.open(img)
buffer = BytesIO()
img.save(buffer, format='PNG')
img_data = buffer.getvalue()
base64_data = base64.b64encode(img_data)
return base64_data.decode('utf-8')
def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] :
if base64str is None :
return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode
img_bytes = base64.b64decode(base64str)
return Image.open(io.BytesIO(img_bytes))

@ -0,0 +1,39 @@
import requests
from PIL import Image
from client_utils import FaceSwapRequest, FaceSwapUnit, PostProcessingOptions, FaceSwapResponse, pil_to_base64
address = 'http://127.0.0.1:7860'
# First face unit :
unit1 = FaceSwapUnit(
source_img=pil_to_base64("../../references/man.png"), # The face you want to use
faces_index=(0,) # Replace first face
)
# Second face unit :
unit2 = FaceSwapUnit(
source_img=pil_to_base64("../../references/woman.png"), # The face you want to use
same_gender=True,
faces_index=(0,) # Replace first woman since same gender is on
)
# Post-processing config :
pp = PostProcessingOptions(
face_restorer_name="CodeFormer",
codeformer_weight=0.5,
restorer_visibility= 1)
# Prepare the request
request = FaceSwapRequest (
image = pil_to_base64("test_image.png"),
units= [unit1, unit2],
postprocessing=pp
)
result = requests.post(url=f'{address}/faceswaplab/swap_face', data=request.json(), headers={"Content-Type": "application/json; charset=utf-8"})
response = FaceSwapResponse.parse_obj(result.json())
for img, info in zip(response.pil_images, response.infos):
img.show(title = info)

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

@ -0,0 +1,56 @@
import launch
import os
import pkg_resources
import sys
from tqdm import tqdm
import urllib.request
req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")
models_dir = os.path.abspath("models/faceswaplab")
faces_dir = os.path.abspath(os.path.join("models","faceswaplab","faces"))
model_url = "https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx"
model_name = os.path.basename(model_url)
model_path = os.path.join(models_dir, model_name)
def download(url, path):
request = urllib.request.urlopen(url)
total = int(request.headers.get('Content-Length', 0))
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size))
os.makedirs(models_dir, exist_ok=True)
os.makedirs(faces_dir, exist_ok=True)
if not os.path.exists(model_path):
download(model_url, model_path)
print("Checking faceswaplab requirements")
with open(req_file) as file:
for package in file:
try:
python = sys.executable
package = package.strip()
if not launch.is_installed(package.split("==")[0]):
print(f"Install {package}")
launch.run_pip(
f"install {package}", f"sd-webui-faceswaplab requirement: {package}"
)
elif "==" in package:
package_name, package_version = package.split("==")
installed_version = pkg_resources.get_distribution(package_name).version
if installed_version != package_version:
print(
f"Install {package}, {installed_version} vs {package_version}"
)
launch.run_pip(
f"install {package}",
f"sd-webui-faceswaplab requirement: changing {package_name} version from {installed_version} to {package_version}",
)
except Exception as e:
print(e)
print(f"Warning: Failed to install {package}, faceswaplab will not work.")
raise e

@ -0,0 +1,4 @@
window.onbeforeunload = function() {
// Prevent the stable diffusion window from being closed by mistake
return "Are you sure ?";
};

@ -0,0 +1,7 @@
def preload(parser):
parser.add_argument(
"--faceswaplab_loglevel",
default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Set the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)

Binary file not shown.

After

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

@ -0,0 +1,9 @@
insightface==0.7.3
onnx==1.14.0
onnxruntime==1.15.0
opencv-python==4.7.0.72
dill==0.3.6
pandas
ifnude
cython
pydantic==1.10.9

@ -0,0 +1,226 @@
import importlib
from scripts.faceswaplab_api import faceswaplab_api
from scripts.faceswaplab_settings import faceswaplab_settings
from scripts.faceswaplab_ui import faceswaplab_tab, faceswaplab_unit_ui
from scripts.faceswaplab_utils.models_utils import get_current_model, get_face_checkpoints
from scripts import (faceswaplab_globals)
from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils import faceswaplab_logging, imgutils
from scripts.faceswaplab_utils import models_utils
from scripts.faceswaplab_postprocessing import upscaling
import numpy as np
#Reload all the modules when using "apply and restart"
#This is mainly done for development purposes
importlib.reload(swapper)
importlib.reload(faceswaplab_logging)
importlib.reload(faceswaplab_globals)
importlib.reload(imgutils)
importlib.reload(upscaling)
importlib.reload(faceswaplab_settings)
importlib.reload(models_utils)
importlib.reload(faceswaplab_unit_ui)
importlib.reload(faceswaplab_api)
import os
from dataclasses import fields
from pprint import pformat
from typing import List, Optional, Tuple
import dill as pickle
import gradio as gr
import modules.scripts as scripts
from modules import script_callbacks, scripts
from insightface.app.common import Face
from modules import scripts, shared
from modules.images import save_image, image_grid
from modules.processing import (Processed, StableDiffusionProcessing,
StableDiffusionProcessingImg2Img)
from modules.shared import opts
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw)
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts.faceswaplab_globals import VERSION_FLAG
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab")
# Register the tab, done here to prevent it from being added twice
script_callbacks.on_ui_tabs(faceswaplab_tab.on_ui_tabs)
try:
import modules.script_callbacks as script_callbacks
script_callbacks.on_app_started(faceswaplab_api.faceswaplab_api)
except:
pass
class FaceSwapScript(scripts.Script):
def __init__(self) -> None:
logger.info(f"FaceSwapLab {VERSION_FLAG}")
super().__init__()
@property
def units_count(self) :
return opts.data.get("faceswaplab_units_count", 3)
@property
def upscaled_swapper_in_generated(self) :
return opts.data.get("faceswaplab_upscaled_swapper", False)
@property
def upscaled_swapper_in_source(self) :
return opts.data.get("faceswaplab_upscaled_swapper_in_source", False)
@property
def enabled(self) -> bool :
"""Return True if any unit is enabled and the state is not interupted"""
return any([u.enable for u in self.units]) and not shared.state.interrupted
@property
def keep_original_images(self) :
return opts.data.get("faceswaplab_keep_original", False)
@property
def swap_in_generated_units(self) :
return [u for u in self.units if u.swap_in_generated and u.enable]
@property
def swap_in_source_units(self) :
return [u for u in self.units if u.swap_in_source and u.enable]
def title(self):
return f"faceswaplab"
def show(self, is_img2img):
return scripts.AlwaysVisible
def ui(self, is_img2img):
with gr.Accordion(f"FaceSwapLab {VERSION_FLAG}", open=False):
components = []
for i in range(1, self.units_count + 1):
components += faceswaplab_unit_ui.faceswap_unit_ui(is_img2img, i)
upscaler = faceswaplab_tab.upscaler_ui()
# If the order is modified, the before_process should be changed accordingly.
return components + upscaler
# def make_script_first(self,p: StableDiffusionProcessing) :
# FIXME : not really useful, will only impact postprocessing (kept for further testing)
# runner : scripts.ScriptRunner = p.scripts
# alwayson = runner.alwayson_scripts
# alwayson.pop(alwayson.index(self))
# alwayson.insert(0, self)
# print("Running in ", alwayson.index(self), "position")
# logger.info("Running scripts : %s", pformat(runner.alwayson_scripts))
def read_config(self, p : StableDiffusionProcessing, *components) :
# The order of processing for the components is important
# The method first process faceswap units then postprocessing units
# self.make_first_script(p)
self.units: List[FaceSwapUnitSettings] = []
#Parse and convert units flat components into FaceSwapUnitSettings
for i in range(0, self.units_count):
self.units += [FaceSwapUnitSettings.get_unit_configuration(i, components)]
for i, u in enumerate(self.units):
logger.debug("%s, %s", pformat(i), pformat(u))
#Parse the postprocessing options
#We must first find where to start from (after face swapping units)
len_conf: int = len(fields(FaceSwapUnitSettings))
shift: int = self.units_count * len_conf
self.postprocess_options = PostProcessingOptions(
*components[shift : shift + len(fields(PostProcessingOptions))]
)
logger.debug("%s", pformat(self.postprocess_options))
if self.enabled :
p.do_not_save_samples = not self.keep_original_images
def process(self, p: StableDiffusionProcessing, *components):
self.read_config(p, *components)
#If is instance of img2img, we check if face swapping in source is required.
if isinstance(p, StableDiffusionProcessingImg2Img):
if self.enabled and len(self.swap_in_source_units) > 0:
init_images : List[Tuple[Optional[Image.Image], Optional[str]]] = [(img,None) for img in p.init_images]
new_inits = swapper.process_images_units(get_current_model(), self.swap_in_source_units,images=init_images, upscaled_swapper=self.upscaled_swapper_in_source,force_blend=True)
logger.info(f"processed init images: {len(init_images)}")
if new_inits is not None :
p.init_images = [img[0] for img in new_inits]
def postprocess(self, p : StableDiffusionProcessing, processed: Processed, *args):
if self.enabled :
# Get the original images without the grid
orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:]
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
keep_original = self.keep_original_images
# These are were images and infos of swapped images will be stored
images = []
infotexts = []
if (len(self.swap_in_generated_units))>0 :
for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)):
batch_index = i%p.batch_size
swapped_images = swapper.process_images_units(get_current_model(), self.swap_in_generated_units, images=[(img,info)], upscaled_swapper=self.upscaled_swapper_in_generated)
if swapped_images is None :
continue
logger.info(f"{len(swapped_images)} images swapped")
for swp_img, new_info in swapped_images :
img = swp_img # Will only swap the last image in the batch in next units (FIXME : hard to fix properly but not really critical)
if swp_img is not None :
save_img_debug(swp_img,"Before apply mask")
swp_img = imgutils.apply_mask(swp_img, p, batch_index)
save_img_debug(swp_img,"After apply mask")
try :
if self.postprocess_options is not None:
swp_img = enhance_image(swp_img, self.postprocess_options)
except Exception as e:
logger.error("Failed to upscale : %s", e)
logger.info("Add swp image to processed")
images.append(swp_img)
infotexts.append(new_info)
if p.outpath_samples and opts.samples_save :
save_image(swp_img, p.outpath_samples, "", p.all_seeds[batch_index], p.all_prompts[batch_index], opts.samples_format,info=new_info, p=p, suffix="-swapped")
else :
logger.error("swp image is None")
else :
keep_original=True
# Generate grid :
if opts.return_grid and len(images) > 1:
# FIXME :Use sd method, not that if blended is not active, the result will be a bit messy.
grid = imgutils.create_square_image(images)
text = processed.infotexts[0]
infotexts.insert(0, text)
if opts.enable_pnginfo:
grid.info["parameters"] = text
images.insert(0, grid)
if keep_original:
# If we want to keep original images, we add all existing (including grid this time)
images += processed.images
infotexts += processed.infotexts
processed.images = images
processed.infotexts = infotexts

@ -0,0 +1,70 @@
from PIL import Image
import numpy as np
from fastapi import FastAPI, Body
from fastapi.exceptions import HTTPException
from modules.api.models import *
from modules.api import api
from scripts.faceswaplab_api.faceswaplab_api_types import FaceSwapUnit, FaceSwapRequest, FaceSwapResponse
from scripts.faceswaplab_globals import VERSION_FLAG
import gradio as gr
from typing import List, Optional
from scripts.faceswaplab_swapping import swapper
from scripts.faceswaplab_utils.faceswaplab_logging import save_img_debug
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil)
from scripts.faceswaplab_utils.models_utils import get_current_model
from modules.shared import opts
def encode_to_base64(image):
if type(image) is str:
return image
elif type(image) is Image.Image:
return api.encode_pil_to_base64(image)
elif type(image) is np.ndarray:
return encode_np_to_base64(image)
else:
return ""
def encode_np_to_base64(image):
pil = Image.fromarray(image)
return api.encode_pil_to_base64(pil)
def faceswaplab_api(_: gr.Blocks, app: FastAPI):
@app.get("/faceswaplab/version", tags=["faceswaplab"], description="Get faceswaplab version")
async def version():
return {"version": VERSION_FLAG}
# use post as we consider the method non idempotent (which is debatable)
@app.post("/faceswaplab/swap_face", tags=["faceswaplab"], description="Swap a face in an image using units")
async def swap_face(request : FaceSwapRequest) -> FaceSwapResponse:
units : List[FaceSwapUnitSettings]= []
src_image : Optional[Image.Image] = base64_to_pil(request.image)
response = FaceSwapResponse(images = [], infos=[])
if src_image is not None :
for u in request.units:
units.append(
FaceSwapUnitSettings(source_img=base64_to_pil(u.source_img),
source_face = u.source_face,
_batch_files = u.get_batch_images(),
blend_faces= u.blend_faces,
enable = True,
same_gender = u.same_gender,
check_similarity=u.check_similarity,
_compute_similarity=u.compute_similarity,
min_ref_sim= u.min_ref_sim,
min_sim= u.min_sim,
_faces_index = ",".join([str(i) for i in (u.faces_index)]),
swap_in_generated=True,
swap_in_source=False
)
)
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False))
for img, info in swapped_images:
response.images.append(encode_to_base64(img))
response.infos.append(info)
return response

@ -0,0 +1,73 @@
from scripts.faceswaplab_swapping import swapper
import numpy as np
from typing import List, Tuple
import dill as pickle
import gradio as gr
from insightface.app.common import Face
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw, base64_to_pil)
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from pydantic import BaseModel, Field
from scripts.faceswaplab_postprocessing.postprocessing_options import InpaintingWhen
class FaceSwapUnit(BaseModel) :
# The image given in reference
source_img: str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
# The checkpoint file
source_face : str = Field(description='face checkpoint (from models/faceswaplab/faces)',examples=["my_face.pkl"], default=None)
# base64 batch source images
batch_images: Tuple[str] = Field(description='list of base64 batch source images',examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD....", "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
# Will blend faces if True
blend_faces: bool = Field(description='Will blend faces if True', default=True)
# Use same gender filtering
same_gender: bool = Field(description='Use same gender filtering', default=True)
# If True, discard images with low similarity
check_similarity : bool = Field(description='If True, discard images with low similarity', default=False)
# if True will compute similarity and add it to the image info
compute_similarity : bool = Field(description='If True will compute similarity and add it to the image info', default=False)
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float = Field(description='Minimum similarity against the used face (reference, batch or checkpoint)', default=0.0)
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float = Field(description='Minimum similarity against the reference (reference or checkpoint if checkpoint is given)', default=0.0)
# The face index to use for swapping
faces_index: Tuple[int] = Field(description='The face index to use for swapping, list of face numbers starting from 0', default=(0,))
def get_batch_images(self) -> List[Image.Image] :
images = []
if self.batch_images :
for img in self.batch_images :
images.append(base64_to_pil(img))
return images
class PostProcessingOptions (BaseModel):
face_restorer_name: str = Field(description='face restorer name', default=None)
restorer_visibility: float = Field(description='face restorer visibility', default=1, le=1, ge=0)
codeformer_weight: float = Field(description='face restorer codeformer weight', default=1, le=1, ge=0)
upscaler_name: str = Field(description='upscaler name', default=None)
scale: float = Field(description='upscaling scale', default=1, le=10, ge=0)
upscale_visibility: float = Field(description='upscaler visibility', default=1, le=1, ge=0)
inpainting_denoising_strengh : float = Field(description='Inpainting denoising strenght', default=0, lt=1, ge=0)
inpainting_prompt : str = Field(description='Inpainting denoising strenght',examples=["Portrait of a [gender]"], default="Portrait of a [gender]")
inpainting_negative_prompt : str = Field(description='Inpainting denoising strenght',examples=["Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation"], default="")
inpainting_steps : int = Field(description='Inpainting steps',examples=["Portrait of a [gender]"], ge=1, le=150, default=20)
inpainting_sampler : str = Field(description='Inpainting sampler',examples=["Euler"], default="Euler")
inpainting_when : InpaintingWhen = Field(description='When inpainting happens', examples=[e.value for e in InpaintingWhen.__members__.values()], default=InpaintingWhen.NEVER)
class FaceSwapRequest(BaseModel) :
image : str = Field(description='base64 reference image', examples=["data:image/jpeg;base64,/9j/4AAQSkZJRgABAQECWAJYAAD...."], default=None)
units : List[FaceSwapUnit]
postprocessing : PostProcessingOptions
class FaceSwapResponse(BaseModel) :
images : List[str] = Field(description='base64 swapped image',default=None)
infos : List[str]

@ -0,0 +1,11 @@
from scripts.faceswaplab_utils.faceswaplab_logging import logger
import os
MODELS_DIR = os.path.abspath(os.path.join("models","faceswaplab"))
ANALYZER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "analysers"))
FACE_PARSER_DIR = os.path.abspath(os.path.join(MODELS_DIR, "parser"))
VERSION_FLAG = "v1.1.0"
EXTENSION_PATH=os.path.join("extensions","sd-webui-faceswaplab")
NSFW_SCORE = 0.7

@ -0,0 +1,76 @@
from modules.face_restoration import FaceRestoration
from modules.upscaler import UpscalerData
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image
import numpy as np
from modules import shared
from scripts.faceswaplab_utils import imgutils
from modules import shared, processing, codeformer_model
from modules.processing import (StableDiffusionProcessingImg2Img)
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from modules import sd_models
from scripts.faceswaplab_swapping import swapper
def img2img_diffusion(img : Image.Image, pp: PostProcessingOptions) -> Image.Image :
if pp.inpainting_denoising_strengh == 0 :
return img
try :
logger.info(
f"""Inpainting face
Sampler : {pp.inpainting_sampler}
inpainting_denoising_strength : {pp.inpainting_denoising_strengh}
inpainting_steps : {pp.inpainting_steps}
"""
)
if not isinstance(pp.inpainting_sampler, str) :
pass
logger.info("send faces to image to image")
img = img.copy()
faces = swapper.get_faces(imgutils.pil_to_cv2(img))
if faces:
for face in faces:
bbox =face.bbox.astype(int)
mask = imgutils.create_mask(img, bbox)
prompt = pp.inpainting_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
negative_prompt = pp.inpainting_negative_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
logger.info("Denoising prompt : %s", prompt)
logger.info("Denoising strenght : %s", pp.inpainting_denoising_strengh)
i2i_kwargs = {"sampler_name" :pp.inpainting_sampler,
"do_not_save_samples":True,
"steps" :pp.inpainting_steps,
"width" : img.width,
"inpainting_fill":1,
"inpaint_full_res":True,
"height" : img.height,
"mask": mask,
"prompt" : prompt,
"negative_prompt" :negative_prompt,
"denoising_strength" :pp.inpainting_denoising_strengh}
current_model_checkpoint = shared.opts.sd_model_checkpoint
if pp.inpainting_model and pp.inpainting_model != "Current" :
# Change checkpoint
shared.opts.sd_model_checkpoint = pp.inpainting_model
sd_models.select_checkpoint
sd_models.load_model()
i2i_p = StableDiffusionProcessingImg2Img([img], **i2i_kwargs)
i2i_processed = processing.process_images(i2i_p)
if pp.inpainting_model and pp.inpainting_model != "Current" :
# Restore checkpoint
shared.opts.sd_model_checkpoint = current_model_checkpoint
sd_models.select_checkpoint
sd_models.load_model()
images = i2i_processed.images
if len(images) > 0 :
img = images[0]
return img
except Exception as e :
logger.error("Failed to apply img2img to face : %s", e)
import traceback
traceback.print_exc()
raise e

@ -0,0 +1,27 @@
from modules.face_restoration import FaceRestoration
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from scripts.faceswaplab_postprocessing.i2i_pp import img2img_diffusion
from scripts.faceswaplab_postprocessing.upscaling import upscale_img, restore_face
def enhance_image(image: Image.Image, pp_options: PostProcessingOptions) -> Image.Image:
result_image = image
try :
if pp_options.inpainting_when == InpaintingWhen.BEFORE_UPSCALING.value :
result_image = img2img_diffusion(image, pp_options)
result_image = upscale_img(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.BEFORE_RESTORE_FACE.value :
result_image = img2img_diffusion(image,pp_options)
result_image = restore_face(result_image, pp_options)
if pp_options.inpainting_when == InpaintingWhen.AFTER_ALL.value :
result_image = img2img_diffusion(image,pp_options)
except Exception as e:
logger.error("Failed to upscale %s", e)
return result_image

@ -0,0 +1,43 @@
from modules.face_restoration import FaceRestoration
from modules.upscaler import UpscalerData
from dataclasses import dataclass
from modules import shared
from enum import Enum
class InpaintingWhen(Enum):
NEVER = "Never"
BEFORE_UPSCALING = "Before Upscaling/all"
BEFORE_RESTORE_FACE = "After Upscaling/Before Restore Face"
AFTER_ALL = "After All"
@dataclass
class PostProcessingOptions:
face_restorer_name: str = ""
restorer_visibility: float = 0.5
codeformer_weight: float = 1
upscaler_name: str = ""
scale: int = 1
upscale_visibility: float = 0.5
inpainting_denoising_strengh : float = 0
inpainting_prompt : str = ""
inpainting_negative_prompt : str = ""
inpainting_steps : int = 20
inpainting_sampler : str = "Euler"
inpainting_when : InpaintingWhen = InpaintingWhen.BEFORE_UPSCALING
inpainting_model : str = "Current"
@property
def upscaler(self) -> UpscalerData:
for upscaler in shared.sd_upscalers:
if upscaler.name == self.upscaler_name:
return upscaler
return None
@property
def face_restorer(self) -> FaceRestoration:
for face_restorer in shared.face_restorers:
if face_restorer.name() == self.face_restorer_name:
return face_restorer
return None

@ -0,0 +1,42 @@
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions, InpaintingWhen
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from PIL import Image
import numpy as np
from modules import shared, processing, codeformer_model
def upscale_img(image : Image.Image, pp_options :PostProcessingOptions) -> Image.Image :
if pp_options.upscaler is not None and pp_options.upscaler.name != "None":
original_image = image.copy()
logger.info(
"Upscale with %s scale = %s",
pp_options.upscaler.name,
pp_options.scale,
)
result_image = pp_options.upscaler.scaler.upscale(
image, pp_options.scale, pp_options.upscaler.data_path
)
if pp_options.scale == 1:
result_image = Image.blend(
original_image, result_image, pp_options.upscale_visibility
)
return result_image
return image
def restore_face(image : Image.Image, pp_options : PostProcessingOptions) -> Image.Image :
if pp_options.face_restorer is not None:
original_image = image.copy()
logger.info("Restore face with %s", pp_options.face_restorer.name())
numpy_image = np.array(image)
if pp_options.face_restorer_name == "CodeFormer" :
numpy_image = codeformer_model.codeformer.restore(numpy_image, w=pp_options.codeformer_weight)
else :
numpy_image = pp_options.face_restorer.restore(numpy_image)
restored_image = Image.fromarray(numpy_image)
result_image = Image.blend(
original_image, restored_image, pp_options.restorer_visibility
)
return result_image
return image

@ -0,0 +1,53 @@
from scripts.faceswaplab_utils.models_utils import get_models
from modules import script_callbacks, shared
import gradio as gr
def on_ui_settings():
section = ('faceswaplab', "FaceSwapLab")
models = get_models()
shared.opts.add_option("faceswaplab_model", shared.OptionInfo(
models[0] if len(models) > 0 else "None", "FaceSwapLab FaceSwap Model", gr.Dropdown, {"interactive": True, "choices" : models}, section=section))
shared.opts.add_option("faceswaplab_keep_original", shared.OptionInfo(
False, "keep original image before swapping", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_units_count", shared.OptionInfo(
3, "Max faces units (requires restart)", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}, section=section))
shared.opts.add_option("faceswaplab_detection_threshold", shared.OptionInfo(
0.5, "Detection threshold ", gr.Slider, {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_pp_default_face_restorer", shared.OptionInfo(
None, "UI Default post processing face restorer (requires restart)", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("faceswaplab_pp_default_face_restorer_visibility", shared.OptionInfo(
1, "UI Default post processing face restorer visibility (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_pp_default_face_restorer_weight", shared.OptionInfo(
1, "UI Default post processing face restorer weight (requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_pp_default_upscaler", shared.OptionInfo(
None, "UI Default post processing upscaler (requires restart)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section))
shared.opts.add_option("faceswaplab_pp_default_upscaler_visibility", shared.OptionInfo(
1, "UI Default post processing upscaler visibility(requires restart)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper", shared.OptionInfo(
False, "Upscaled swapper. Applied only to the swapped faces. Apply transformations before merging with the original image.", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_upscaler", shared.OptionInfo(
None, "Upscaled swapper upscaler (Recommanded : LDSR but slow)", gr.Dropdown, {"interactive": True, "choices" : [upscaler.name for upscaler in shared.sd_upscalers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_sharpen", shared.OptionInfo(
False, "Upscaled swapper sharpen", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fixcolor", shared.OptionInfo(
False, "Upscaled swapper color correction", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_improved_mask", shared.OptionInfo(
True, "Use improved segmented mask (use pastenet to mask only the face)", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer", shared.OptionInfo(
None, "Upscaled swapper face restorer", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_visibility", shared.OptionInfo(
1, "Upscaled swapper face restorer visibility", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_face_restorer_weight", shared.OptionInfo(
1, "Upscaled swapper face restorer weight (codeformer)", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.001}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_fthresh", shared.OptionInfo(
10, "Upscaled swapper fthresh (diff sensitivity) 10 = default behaviour. Low impact.", gr.Slider, {"minimum": 5, "maximum": 250, "step": 1}, section=section))
shared.opts.add_option("faceswaplab_upscaled_swapper_erosion", shared.OptionInfo(
1, "Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible.", gr.Slider, {"minimum": 0, "maximum": 10, "step": 0.001}, section=section))
script_callbacks.on_ui_settings(on_ui_settings)

@ -0,0 +1,85 @@
import cv2
import numpy as np
import torch
from torchvision.transforms.functional import normalize
from scripts.faceswaplab_swapping.parsing import init_parsing_model
from functools import lru_cache
from typing import Union, List
from torch import device as torch_device
@lru_cache
def get_parsing_model(device: torch_device) -> torch.nn.Module:
"""
Returns an instance of the parsing model.
The returned model is cached for faster subsequent access.
Args:
device: The torch device to use for computations.
Returns:
The parsing model.
"""
return init_parsing_model(device=device)
def convert_image_to_tensor(images: Union[np.ndarray, List[np.ndarray]], convert_bgr_to_rgb: bool = True, use_float32: bool = True) -> Union[torch.Tensor, List[torch.Tensor]]:
"""
Converts an image or a list of images to PyTorch tensor.
Args:
images: An image or a list of images in numpy.ndarray format.
convert_bgr_to_rgb: A boolean flag indicating if the conversion from BGR to RGB should be performed.
use_float32: A boolean flag indicating if the tensor should be converted to float32.
Returns:
PyTorch tensor or a list of PyTorch tensors.
"""
def _convert_single_image_to_tensor(image: np.ndarray, convert_bgr_to_rgb: bool, use_float32: bool) -> torch.Tensor:
if image.shape[2] == 3 and convert_bgr_to_rgb:
if image.dtype == 'float64':
image = image.astype('float32')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_tensor = torch.from_numpy(image.transpose(2, 0, 1))
if use_float32:
image_tensor = image_tensor.float()
return image_tensor
if isinstance(images, list):
return [_convert_single_image_to_tensor(image, convert_bgr_to_rgb, use_float32) for image in images]
else:
return _convert_single_image_to_tensor(images, convert_bgr_to_rgb, use_float32)
def generate_face_mask(face_image: np.ndarray, device: torch.device) -> np.ndarray:
"""
Generates a face mask given a face image.
Args:
face_image: The face image in numpy.ndarray format.
device: The torch device to use for computations.
Returns:
The face mask as a numpy.ndarray.
"""
# Resize the face image for the model
resized_face_image = cv2.resize(face_image, (512, 512), interpolation=cv2.INTER_LINEAR)
# Preprocess the image
face_input = convert_image_to_tensor((resized_face_image.astype('float32') / 255.0), convert_bgr_to_rgb=True, use_float32=True)
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
assert isinstance(face_input,torch.Tensor)
face_input = torch.unsqueeze(face_input, 0).to(device)
# Pass the image through the model
with torch.no_grad():
model_output = get_parsing_model(device)(face_input)[0]
model_output = model_output.argmax(dim=1).squeeze().cpu().numpy()
# Generate the mask from the model output
parse_mask = np.zeros(model_output.shape)
MASK_COLOR_MAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
for idx, color in enumerate(MASK_COLOR_MAP):
parse_mask[model_output == idx] = color
# Resize the mask to match the original image
face_mask = cv2.resize(parse_mask, (face_image.shape[1], face_image.shape[0]))
return face_mask

@ -0,0 +1,81 @@
"""
Code from codeformer https://github.com/sczhou/CodeFormer
S-Lab License 1.0
Copyright 2022 S-Lab
Redistribution and use for non-commercial purpose in source and
binary forms, with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
In the event that redistribution and/or use for commercial purpose in
source or binary forms, with or without modification is required,
please contact the contributor(s) of the work.
"""
import torch
import cv2
import os
import torch
from torch.hub import download_url_to_file, get_dir
from .parsenet import ParseNet
from urllib.parse import urlparse
from scripts.faceswaplab_globals import FACE_PARSER_DIR
ROOT_DIR = FACE_PARSER_DIR
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
"""
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(ROOT_DIR, model_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
def init_parsing_model(device='cuda'):
model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth'
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
return model

@ -0,0 +1,680 @@
"""
Code from codeformer https://github.com/sczhou/CodeFormer
S-Lab License 1.0
Copyright 2022 S-Lab
Redistribution and use for non-commercial purpose in source and
binary forms, with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
In the event that redistribution and/or use for commercial purpose in
source or binary forms, with or without modification is required,
please contact the contributor(s) of the work.
Modified from https://github.com/chaofengc/PSFRGAN
PSFR-GAN (c) by Chaofeng Chen
PSFR-GAN is licensed under a
Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-nc-sa/4.0/>.
Attribution-NonCommercial-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International Public License
("Public License"). To the extent this Public License may be
interpreted as a contract, You are granted the Licensed Rights in
consideration of Your acceptance of these terms and conditions, and the
Licensor grants You such rights in consideration of benefits the
Licensor receives from making the Licensed Material available under
these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-NC-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution, NonCommercial, and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. NonCommercial means not primarily intended for or directed towards
commercial advantage or monetary compensation. For purposes of
this Public License, the exchange of the Licensed Material for
other material subject to Copyright and Similar Rights by digital
file-sharing or similar means is NonCommercial provided there is
no payment of monetary compensation in connection with the
exchange.
l. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
m. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
n. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part, for NonCommercial purposes only; and
b. produce, reproduce, and Share Adapted Material for
NonCommercial purposes only.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties, including when
the Licensed Material is used other than for NonCommercial
purposes.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-NC-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database for NonCommercial purposes
only;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the Licensor. The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.
"""
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
class NormLayer(nn.Module):
"""Normalization Layers.
Args:
channels: input channels, for batch norm and instance norm.
input_size: input shape without batch size, for layer norm.
"""
def __init__(self, channels, normalize_shape=None, norm_type='bn'):
super(NormLayer, self).__init__()
norm_type = norm_type.lower()
self.norm_type = norm_type
if norm_type == 'bn':
self.norm = nn.BatchNorm2d(channels, affine=True)
elif norm_type == 'in':
self.norm = nn.InstanceNorm2d(channels, affine=False)
elif norm_type == 'gn':
self.norm = nn.GroupNorm(32, channels, affine=True)
elif norm_type == 'pixel':
self.norm = lambda x: F.normalize(x, p=2, dim=1)
elif norm_type == 'layer':
self.norm = nn.LayerNorm(normalize_shape)
elif norm_type == 'none':
self.norm = lambda x: x * 1.0
else:
assert 1 == 0, f'Norm type {norm_type} not support.'
def forward(self, x, ref=None):
if self.norm_type == 'spade':
return self.norm(x, ref)
else:
return self.norm(x)
class ReluLayer(nn.Module):
"""Relu Layer.
Args:
relu type: type of relu layer, candidates are
- ReLU
- LeakyReLU: default relu slope 0.2
- PRelu
- SELU
- none: direct pass
"""
def __init__(self, channels, relu_type='relu'):
super(ReluLayer, self).__init__()
relu_type = relu_type.lower()
if relu_type == 'relu':
self.func = nn.ReLU(True)
elif relu_type == 'leakyrelu':
self.func = nn.LeakyReLU(0.2, inplace=True)
elif relu_type == 'prelu':
self.func = nn.PReLU(channels)
elif relu_type == 'selu':
self.func = nn.SELU(True)
elif relu_type == 'none':
self.func = lambda x: x * 1.0
else:
assert 1 == 0, f'Relu type {relu_type} not support.'
def forward(self, x):
return self.func(x)
class ConvLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
scale='none',
norm_type='none',
relu_type='none',
use_pad=True,
bias=True):
super(ConvLayer, self).__init__()
self.use_pad = use_pad
self.norm_type = norm_type
if norm_type in ['bn']:
bias = False
stride = 2 if scale == 'down' else 1
self.scale_func = lambda x: x
if scale == 'up':
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
self.relu = ReluLayer(out_channels, relu_type)
self.norm = NormLayer(out_channels, norm_type=norm_type)
def forward(self, x):
out = self.scale_func(x)
if self.use_pad:
out = self.reflection_pad(out)
out = self.conv2d(out)
out = self.norm(out)
out = self.relu(out)
return out
class ResidualBlock(nn.Module):
"""
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
super(ResidualBlock, self).__init__()
if scale == 'none' and c_in == c_out:
self.shortcut_func = lambda x: x
else:
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
scale_conf = scale_config_dict[scale]
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
def forward(self, x):
identity = self.shortcut_func(x)
res = self.conv1(x)
res = self.conv2(res)
return identity + res
class ParseNet(nn.Module):
def __init__(self,
in_size=128,
out_size=128,
min_feat_size=32,
base_ch=64,
parsing_ch=19,
res_depth=10,
relu_type='LeakyReLU',
norm_type='bn',
ch_range=[32, 256]):
super().__init__()
self.res_depth = res_depth
act_args = {'norm_type': norm_type, 'relu_type': relu_type}
min_ch, max_ch = ch_range
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
min_feat_size = min(in_size, min_feat_size)
down_steps = int(np.log2(in_size // min_feat_size))
up_steps = int(np.log2(out_size // min_feat_size))
# =============== define encoder-body-decoder ====================
self.encoder = []
self.encoder.append(ConvLayer(3, base_ch, 3, 1))
head_ch = base_ch
for i in range(down_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
head_ch = head_ch * 2
self.body = []
for i in range(res_depth):
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
self.decoder = []
for i in range(up_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
head_ch = head_ch // 2
self.encoder = nn.Sequential(*self.encoder)
self.body = nn.Sequential(*self.body)
self.decoder = nn.Sequential(*self.decoder)
self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
def forward(self, x):
feat = self.encoder(x)
x = feat + self.body(feat)
x = self.decoder(x)
out_img = self.out_img_conv(x)
out_mask = self.out_mask_conv(x)
return out_mask, out_img

@ -0,0 +1,428 @@
import copy
import os
from dataclasses import dataclass
from typing import Dict, List, Set, Tuple, Optional, Union
import cv2
import insightface
import numpy as np
from insightface.app.common import Face
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
from scripts.faceswaplab_swapping import upscaled_inswapper
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2, check_against_nsfw
from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug
from scripts import faceswaplab_globals
from modules.shared import opts
from functools import lru_cache
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
providers = ["CPUExecutionProvider"]
def cosine_similarity_face(face1, face2) -> float:
"""
Calculates the cosine similarity between two face embeddings.
Args:
face1 (Face): The first face object containing an embedding.
face2 (Face): The second face object containing an embedding.
Returns:
float: The cosine similarity between the face embeddings.
Note:
The cosine similarity ranges from 0 to 1, where 1 indicates identical embeddings and 0 indicates completely
dissimilar embeddings. In this implementation, the similarity is clamped to a minimum value of 0 to ensure a
non-negative similarity score.
"""
# Reshape the face embeddings to have a shape of (1, -1)
vec1 = face1.embedding.reshape(1, -1)
vec2 = face2.embedding.reshape(1, -1)
# Calculate the cosine similarity between the reshaped embeddings
similarity = cosine_similarity(vec1, vec2)
# Return the maximum of 0 and the calculated similarity as the final similarity score
return max(0, similarity[0, 0])
def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
"""
Compares the similarity between two faces extracted from images using cosine similarity.
Args:
img1: The first image containing a face.
img2: The second image containing a face.
Returns:
A float value representing the similarity between the two faces (0 to 1).
Returns -1 if one or both of the images do not contain any faces.
"""
# Extract faces from the images
face1 = get_or_default(get_faces(pil_to_cv2(img1)), 0, None)
face2 = get_or_default(get_faces(pil_to_cv2(img2)), 0, None)
# Check if both faces are detected
if face1 is not None and face2 is not None:
# Calculate the cosine similarity between the faces
return cosine_similarity_face(face1, face2)
# Return -1 if one or both of the images do not contain any faces
return -1
class FaceModelException(Exception):
"""Exception raised when an error is encountered in the face model."""
def __init__(self, message: str) -> None:
"""
Args:
message: A string containing the error description.
"""
self.message = message
super().__init__(self.message)
@lru_cache(maxsize=1)
def getAnalysisModel():
"""
Retrieves the analysis model for face analysis.
Returns:
insightface.app.FaceAnalysis: The analysis model for face analysis.
"""
try :
if not os.path.exists(faceswaplab_globals.ANALYZER_DIR):
os.makedirs(faceswaplab_globals.ANALYZER_DIR)
logger.info("Load analysis model, will take some time.")
# Initialize the analysis model with the specified name and providers
return insightface.app.FaceAnalysis(
name="buffalo_l", providers=providers, root=faceswaplab_globals.ANALYZER_DIR
)
except Exception as e :
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
raise FaceModelException("Loading of swapping model failed")
@lru_cache(maxsize=1)
def getFaceSwapModel(model_path: str):
"""
Retrieves the face swap model and initializes it if necessary.
Args:
model_path (str): Path to the face swap model.
Returns:
insightface.model_zoo.FaceModel: The face swap model.
"""
try :
# Initializes the face swap model using the specified model path.
return upscaled_inswapper.UpscaledINSwapper(insightface.model_zoo.get_model(model_path, providers=providers))
except Exception as e :
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[int]=None, sort_by_face_size = False) -> List[Face]:
"""
Detects and retrieves faces from an image using an analysis model.
Args:
img_data (np.ndarray): The image data as a NumPy array.
det_size (tuple): The desired detection size (width, height). Defaults to (640, 640).
sort_by_face_size (bool) : Will sort the faces by their size from larger to smaller face
Returns:
list: A list of detected faces, sorted by their x-coordinate of the bounding box.
"""
if det_thresh is None :
det_thresh = opts.data.get("faceswaplab_detection_threshold", 0.5)
# Create a deep copy of the analysis model (otherwise det_size is attached to the analysis model and can't be changed)
face_analyser = copy.deepcopy(getAnalysisModel())
# Prepare the analysis model for face detection with the specified detection size
face_analyser.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size)
# Get the detected faces from the image using the analysis model
face = face_analyser.get(img_data)
# If no faces are detected and the detection size is larger than 320x320,
# recursively call the function with a smaller detection size
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
det_size_half = (det_size[0] // 2, det_size[1] // 2)
return get_faces(img_data, det_size=det_size_half, det_thresh=det_thresh)
try:
if sort_by_face_size :
return sorted(face, reverse=True, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
# Sort the detected faces based on their x-coordinate of the bounding box
return sorted(face, key=lambda x: x.bbox[0])
except Exception as e:
return []
@dataclass
class ImageResult:
"""
Represents the result of an image swap operation
"""
image: Image.Image
"""
The image object with the swapped face
"""
similarity: Dict[int, float]
"""
A dictionary mapping face indices to their similarity scores.
The similarity scores are represented as floating-point values between 0 and 1.
"""
ref_similarity: Dict[int, float]
"""
A dictionary mapping face indices to their similarity scores compared to a reference image.
The similarity scores are represented as floating-point values between 0 and 1.
"""
def get_or_default(l, index, default):
"""
Retrieve the value at the specified index from the given list.
If the index is out of bounds, return the default value instead.
Args:
l (list): The input list.
index (int): The index to retrieve the value from.
default: The default value to return if the index is out of bounds.
Returns:
The value at the specified index if it exists, otherwise the default value.
"""
return l[index] if index < len(l) else default
def get_faces_from_img_files(files):
"""
Extracts faces from a list of image files.
Args:
files (list): A list of file objects representing image files.
Returns:
list: A list of detected faces.
"""
faces = []
if len(files) > 0:
for file in files:
img = Image.open(file.name) # Open the image file
face = get_or_default(get_faces(pil_to_cv2(img)), 0, None) # Extract faces from the image
if face is not None:
faces.append(face) # Add the detected face to the list of faces
return faces
def blend_faces(faces: List[Face]) -> Face:
"""
Blends the embeddings of multiple faces into a single face.
Args:
faces (List[Face]): List of Face objects.
Returns:
Face: The blended Face object with the averaged embedding.
Returns None if the input list is empty.
Raises:
ValueError: If the embeddings have different shapes.
"""
embeddings = [face.embedding for face in faces]
if len(embeddings) > 0:
embedding_shape = embeddings[0].shape
# Check if all embeddings have the same shape
for embedding in embeddings:
if embedding.shape != embedding_shape:
raise ValueError("embedding shape mismatch")
# Compute the mean of all embeddings
blended_embedding = np.mean(embeddings, axis=0)
# Create a new Face object using the properties of the first face in the list
# Assign the blended embedding to the blended Face object
blended = Face(embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age)
assert not np.array_equal(blended.embedding,faces[0].embedding) if len(faces) > 1 else True, "If len(faces)>0, the blended embedding should not be the same than the first image"
return blended
# Return None if the input list is empty
return None
def swap_face(
reference_face: np.ndarray,
source_face: np.ndarray,
target_img: Image.Image,
model: str,
faces_index: Set[int] = {0},
same_gender=True,
upscaled_swapper = False,
compute_similarity = True,
sort_by_face_size = False
) -> ImageResult:
"""
Swaps faces in the target image with the source face.
Args:
reference_face (np.ndarray): The reference face used for similarity comparison.
source_face (np.ndarray): The source face to be swapped.
target_img (Image.Image): The target image to swap faces in.
model (str): Path to the face swap model.
faces_index (Set[int], optional): Set of indices specifying which faces to swap. Defaults to {0}.
same_gender (bool, optional): If True, only swap faces with the same gender as the source face. Defaults to True.
Returns:
ImageResult: An object containing the swapped image and similarity scores.
"""
return_result = ImageResult(target_img, {}, {})
try :
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
gender = source_face["gender"]
logger.info("Source Gender %s", gender)
if source_face is not None:
result = target_img
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
face_swapper = getFaceSwapModel(model_path)
target_faces = get_faces(target_img, sort_by_face_size=sort_by_face_size)
logger.info("Target faces count : %s", len(target_faces))
if same_gender:
target_faces = [x for x in target_faces if x["gender"] == gender]
logger.info("Target Gender Matches count %s", len(target_faces))
for i, swapped_face in enumerate(target_faces):
logger.info(f"swap face {i}")
if i in faces_index:
result = face_swapper.get(result, swapped_face, source_face, upscale = upscaled_swapper)
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
return_result.image = result_image
if compute_similarity :
try:
result_faces = get_faces(
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR), sort_by_face_size=sort_by_face_size
)
if same_gender:
result_faces = [x for x in result_faces if x["gender"] == gender]
for i, swapped_face in enumerate(result_faces):
logger.info(f"compare face {i}")
if i in faces_index and i < len(target_faces):
return_result.similarity[i] = cosine_similarity_face(
source_face, swapped_face
)
return_result.ref_similarity[i] = cosine_similarity_face(
reference_face, swapped_face
)
logger.info(f"similarity {return_result.similarity}")
logger.info(f"ref similarity {return_result.ref_similarity}")
except Exception as e:
logger.error("Similarity processing failed %s", e)
raise e
except Exception as e :
logger.error("Conversion failed %s", e)
raise e
return return_result
def process_image_unit(model, unit : FaceSwapUnitSettings, image: Image.Image, info = None, upscaled_swapper = False, force_blend = False) -> List:
"""Process one image and return a List of (image, info) (one if blended, many if not).
Args:
unit : the current unit
image : the image where to apply swapping
info : The info
Returns:
List of tuple of (image, info) where image is the image where swapping has been applied and info is the image info with similarity infos.
"""
results = []
if unit.enable :
if check_against_nsfw(image) :
return [(image, info)]
if not unit.blend_faces and not force_blend :
src_faces = unit.faces
logger.info(f"will generate {len(src_faces)} images")
else :
logger.info("blend all faces together")
src_faces = [unit.blended_faces]
assert(not np.array_equal(unit.reference_face.embedding,src_faces[0].embedding) if len(unit.faces)>1 else True), "Reference face cannot be the same as blended"
for i,src_face in enumerate(src_faces):
logger.info(f"Process face {i}")
if unit.reference_face is not None :
reference_face = unit.reference_face
else :
logger.info("Use source face as reference face")
reference_face = src_face
save_img_debug(image, "Before swap")
result: ImageResult = swap_face(
reference_face,
src_face,
image,
faces_index=unit.faces_index,
model=model,
same_gender=unit.same_gender,
upscaled_swapper=upscaled_swapper,
compute_similarity=unit.compute_similarity,
sort_by_face_size=unit.sort_by_size
)
save_img_debug(result.image, "After swap")
if result.image is None :
logger.error("Result image is None")
if (not unit.check_similarity) or result.similarity and all([result.similarity.values()!=0]+[x >= unit.min_sim for x in result.similarity.values()]) and all([result.ref_similarity.values()!=0]+[x >= unit.min_ref_sim for x in result.ref_similarity.values()]):
results.append((result.image, f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}"))
else:
logger.warning(
f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})"
)
logger.debug("process_image_unit : Unit produced %s results", len(results))
return results
def process_images_units(model, units : List[FaceSwapUnitSettings], images: List[Tuple[Optional[Image.Image], Optional[str]]], upscaled_swapper = False, force_blend = False) -> Union[List,None]:
if len(units) == 0 :
logger.info("Finished processing image, return %s images", len(images))
return None
logger.debug("%s more units", len(units))
processed_images = []
for i,(image, info) in enumerate(images) :
logger.debug("Processing image %s", i)
swapped = process_image_unit(model,units[0],image, info, upscaled_swapper, force_blend)
logger.debug("Image %s -> %s images", i, len(swapped))
nexts = process_images_units(model,units[1:],swapped, upscaled_swapper,force_blend)
if nexts :
processed_images.extend(nexts)
else :
processed_images.extend(swapped)
return processed_images

@ -0,0 +1,188 @@
import cv2
import numpy as np
import onnx
import onnxruntime
from insightface.model_zoo.inswapper import INSwapper
from insightface.utils import face_align
from modules import codeformer_model, processing, scripts, shared
from modules.face_restoration import FaceRestoration
from modules.shared import cmd_opts, opts, state
from modules.upscaler import UpscalerData
from onnx import numpy_helper
from PIL import Image
from scripts.faceswaplab_utils.faceswaplab_logging import logger
from scripts.faceswaplab_postprocessing import upscaling
from scripts.faceswaplab_postprocessing.postprocessing_options import \
PostProcessingOptions
from scripts.faceswaplab_swapping.facemask import generate_face_mask
from scripts.faceswaplab_utils.imgutils import cv2_to_pil, pil_to_cv2
def get_upscaler() -> UpscalerData:
for upscaler in shared.sd_upscalers:
if upscaler.name == opts.data.get("faceswaplab_upscaled_swapper_upscaler", "LDSR"):
return upscaler
return None
def merge_images_with_mask(image1, image2, mask):
if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
raise ValueError("Img should have the same shape")
mask = mask.astype(np.uint8)
masked_region = cv2.bitwise_and(image2, image2, mask=mask)
inverse_mask = cv2.bitwise_not(mask)
empty_region = cv2.bitwise_and(image1, image1, mask=inverse_mask)
merged_image = cv2.add(empty_region, masked_region)
return merged_image
def erode_mask(mask, kernel_size=3, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
eroded_mask = cv2.erode(mask, kernel, iterations=iterations)
return eroded_mask
def apply_gaussian_blur(mask, kernel_size=(5, 5), sigma_x=0):
blurred_mask = cv2.GaussianBlur(mask, kernel_size, sigma_x)
return blurred_mask
def dilate_mask(mask, kernel_size=5, iterations=1):
kernel = np.ones((kernel_size, kernel_size), np.uint8)
dilated_mask = cv2.dilate(mask, kernel, iterations=iterations)
return dilated_mask
def get_face_mask(aimg,bgr_fake):
mask1 = generate_face_mask(aimg, device = shared.device)
mask2 = generate_face_mask(bgr_fake, device = shared.device)
mask = dilate_mask(cv2.bitwise_or(mask1,mask2))
return mask
class UpscaledINSwapper():
def __init__(self, inswapper : INSwapper):
self.__dict__.update(inswapper.__dict__)
def forward(self, img, latent):
img = (img - self.input_mean) / self.input_std
pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
return pred
def super_resolution(self,img, k = 2) :
pil_img = cv2_to_pil(img)
options = PostProcessingOptions(
upscaler_name=opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR'),
upscale_visibility=1,
scale=k,
face_restorer_name=opts.data.get('faceswaplab_upscaled_swapper_face_restorer', ""),
codeformer_weight= opts.data.get('faceswaplab_upscaled_swapper_face_restorer_weight', 1),
restorer_visibility=opts.data.get('faceswaplab_upscaled_swapper_face_restorer_visibility', 1))
upscaled = upscaling.upscale_img(pil_img, options)
upscaled = upscaling.restore_face(upscaled, options)
return pil_to_cv2(upscaled)
def get(self, img, target_face, source_face, paste_back=True, upscale = True):
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
latent = source_face.normed_embedding.reshape((1,-1))
latent = np.dot(latent, self.emap)
latent /= np.linalg.norm(latent)
pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
#print(latent.shape, latent.dtype, pred.shape)
img_fake = pred.transpose((0,2,3,1))[0]
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
try :
if not paste_back:
return bgr_fake, M
else:
target_img = img
def compute_diff(bgr_fake,aimg) :
fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
fake_diff = np.abs(fake_diff).mean(axis=2)
fake_diff[:2,:] = 0
fake_diff[-2:,:] = 0
fake_diff[:,:2] = 0
fake_diff[:,-2:] = 0
return fake_diff
if upscale :
print("*"*80)
print(f"Upscaled inswapper using {opts.data.get('faceswaplab_upscaled_swapper_upscaler', 'LDSR')}")
print("*"*80)
k = 4
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0]*k)
# upscale and restore face :
bgr_fake = self.super_resolution(bgr_fake, k)
if opts.data.get("faceswaplab_upscaled_improved_mask", True) :
mask = get_face_mask(aimg,bgr_fake)
bgr_fake = merge_images_with_mask(aimg, bgr_fake,mask)
# compute fake_diff before sharpen and color correction (better result)
fake_diff = compute_diff(bgr_fake, aimg)
if opts.data.get("faceswaplab_upscaled_swapper_sharpen", True) :
print("sharpen")
# Add sharpness
blurred = cv2.GaussianBlur(bgr_fake, (0, 0), 3)
bgr_fake = cv2.addWeighted(bgr_fake, 1.5, blurred, -0.5, 0)
# Apply color corrections
if opts.data.get("faceswaplab_upscaled_swapper_fixcolor", True) :
print("color correction")
correction = processing.setup_color_correction(cv2_to_pil(aimg))
bgr_fake_pil = processing.apply_color_correction(correction, cv2_to_pil(bgr_fake))
bgr_fake = pil_to_cv2(bgr_fake_pil)
else :
fake_diff = compute_diff(bgr_fake, aimg)
IM = cv2.invertAffineTransform(M)
img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
img_white[img_white>20] = 255
fthresh = opts.data.get('faceswaplab_upscaled_swapper_fthresh', 10)
print("fthresh", fthresh)
fake_diff[fake_diff<fthresh] = 0
fake_diff[fake_diff>=fthresh] = 255
img_mask = img_white
mask_h_inds, mask_w_inds = np.where(img_mask==255)
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
mask_size = int(np.sqrt(mask_h*mask_w))
erosion_factor = opts.data.get('faceswaplab_upscaled_swapper_erosion', 1)
k = max(int(mask_size//10*erosion_factor), int(10*erosion_factor))
kernel = np.ones((k,k),np.uint8)
img_mask = cv2.erode(img_mask,kernel,iterations = 1)
kernel = np.ones((2,2),np.uint8)
fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
k = max(int(mask_size//20*erosion_factor), int(5*erosion_factor))
kernel_size = (k, k)
blur_size = tuple(2*i+1 for i in kernel_size)
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
k = int(5*erosion_factor)
kernel_size = (k, k)
blur_size = tuple(2*i+1 for i in kernel_size)
fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
img_mask /= 255
fake_diff /= 255
img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
fake_merged = fake_merged.astype(np.uint8)
return fake_merged
except Exception as e :
import traceback
traceback.print_exc()
raise e

@ -0,0 +1,326 @@
import os
import tempfile
from pprint import pformat, pprint
import dill as pickle
import gradio as gr
import modules.scripts as scripts
import numpy as np
import onnx
import pandas as pd
from scripts.faceswaplab_ui.faceswaplab_unit_ui import faceswap_unit_ui
from scripts.faceswaplab_ui.faceswaplab_upscaler_ui import upscaler_ui
from insightface.app.common import Face
from modules import script_callbacks, scripts
from PIL import Image
from modules.shared import opts
from scripts.faceswaplab_utils import imgutils
from scripts.faceswaplab_utils.imgutils import pil_to_cv2
from scripts.faceswaplab_utils.models_utils import get_models
from scripts.faceswaplab_utils.faceswaplab_logging import logger
import scripts.faceswaplab_swapping.swapper as swapper
from scripts.faceswaplab_postprocessing.postprocessing_options import PostProcessingOptions
from scripts.faceswaplab_postprocessing.postprocessing import enhance_image
from dataclasses import fields
from typing import List
from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings
from scripts.faceswaplab_utils.models_utils import get_current_model
def compare(img1, img2):
if img1 is not None and img2 is not None:
return swapper.compare_faces(img1, img2)
return "You need 2 images to compare"
def extract_faces(files, extract_path, face_restorer_name, face_restorer_visibility, codeformer_weight,upscaler_name,upscaler_scale, upscaler_visibility,inpainting_denoising_strengh, inpainting_prompt, inpainting_negative_prompt, inpainting_steps, inpainting_sampler,inpainting_when):
if not extract_path :
tempfile.mkdtemp()
if files is not None:
images = []
for file in files :
img = Image.open(file.name).convert("RGB")
faces = swapper.get_faces(pil_to_cv2(img))
if faces:
face_images = []
for face in faces:
bbox = face.bbox.astype(int)
x_min, y_min, x_max, y_max = bbox
face_image = img.crop((x_min, y_min, x_max, y_max))
if face_restorer_name or face_restorer_visibility:
scale = 1 if face_image.width > 512 else 512//face_image.width
face_image = enhance_image(face_image, PostProcessingOptions(face_restorer_name=face_restorer_name,
restorer_visibility=face_restorer_visibility,
codeformer_weight= codeformer_weight,
upscaler_name=upscaler_name,
upscale_visibility=upscaler_visibility,
scale=scale,
inpainting_denoising_strengh=inpainting_denoising_strengh,
inpainting_prompt=inpainting_prompt,
inpainting_steps=inpainting_steps,
inpainting_negative_prompt=inpainting_negative_prompt,
inpainting_when=inpainting_when,
inpainting_sampler=inpainting_sampler))
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=extract_path).name
face_image.save(path)
face_images.append(path)
images+= face_images
return images
return None
def analyse_faces(image, det_threshold = 0.5) :
try :
faces = swapper.get_faces(imgutils.pil_to_cv2(image), det_thresh=det_threshold)
result = ""
for i,face in enumerate(faces) :
result+= f"\nFace {i} \n" + "="*40 +"\n"
result+= pformat(face) + "\n"
result+= "="*40
return result
except Exception as e :
logger.error("Analysis Failed : %s", e)
return "Analysis Failed"
def build_face_checkpoint_and_save(batch_files, name):
"""
Builds a face checkpoint, swaps faces, and saves the result to a file.
Args:
batch_files (list): List of image file paths.
name (str): Name of the face checkpoint
Returns:
PIL.Image.Image or None: Resulting swapped face image if successful, otherwise None.
"""
batch_files = batch_files or []
print("Build", name, [x.name for x in batch_files])
faces = swapper.get_faces_from_img_files(batch_files)
blended_face = swapper.blend_faces(faces)
preview_path = os.path.join(
scripts.basedir(), "extensions", "sd-webui-faceswaplab", "references"
)
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab","faces")
if not os.path.exists(faces_path):
os.makedirs(faces_path)
target_img = None
if blended_face:
if blended_face["gender"] == 0:
target_img = Image.open(os.path.join(preview_path, "woman.png"))
else:
target_img = Image.open(os.path.join(preview_path, "man.png"))
if name == "":
name = "default_name"
pprint(blended_face)
result = swapper.swap_face(blended_face, blended_face, target_img, get_models()[0])
result_image = enhance_image(result.image, PostProcessingOptions(face_restorer_name="CodeFormer", restorer_visibility=1))
file_path = os.path.join(faces_path, f"{name}.pkl")
file_number = 1
while os.path.exists(file_path):
file_path = os.path.join(faces_path, f"{name}_{file_number}.pkl")
file_number += 1
result_image.save(file_path+".png")
with open(file_path, "wb") as file:
pickle.dump({"embedding" :blended_face.embedding, "gender" :blended_face.gender, "age" :blended_face.age},file)
try :
with open(file_path, "rb") as file:
data = Face(pickle.load(file))
print(data)
except Exception as e :
print(e)
return result_image
print("No face found")
return target_img
def explore_onnx_faceswap_model(model_path):
data = {
'Node Name': [],
'Op Type': [],
'Inputs': [],
'Outputs': [],
'Attributes': []
}
if model_path:
model = onnx.load(model_path)
for node in model.graph.node:
data['Node Name'].append(pformat(node.name))
data['Op Type'].append(pformat(node.op_type))
data['Inputs'].append(pformat(node.input))
data['Outputs'].append(pformat(node.output))
attributes = []
for attr in node.attribute:
attr_name = attr.name
attr_value = attr.t
attributes.append("{} = {}".format(pformat(attr_name), pformat(attr_value)))
data['Attributes'].append(attributes)
df = pd.DataFrame(data)
return df
def batch_process(files, save_path, *components):
try :
if save_path is not None:
os.makedirs(save_path, exist_ok=True)
units_count = opts.data.get("faceswaplab_units_count", 3)
units: List[FaceSwapUnitSettings] = []
#Parse and convert units flat components into FaceSwapUnitSettings
for i in range(0, units_count):
units += [FaceSwapUnitSettings.get_unit_configuration(i, components)]
for i, u in enumerate(units):
logger.debug("%s, %s", pformat(i), pformat(u))
#Parse the postprocessing options
#We must first find where to start from (after face swapping units)
len_conf: int = len(fields(FaceSwapUnitSettings))
shift: int = units_count * len_conf
postprocess_options = PostProcessingOptions(
*components[shift : shift + len(fields(PostProcessingOptions))]
)
logger.debug("%s", pformat(postprocess_options))
units = [u for u in units if u.enable]
if files is not None:
images = []
for file in files :
current_images = []
src_image = Image.open(file.name).convert("RGB")
swapped_images = swapper.process_images_units(get_current_model(), images=[(src_image,None)], units=units, upscaled_swapper=opts.data.get("faceswaplab_upscaled_swapper", False))
if len(swapped_images) > 0:
current_images+= [img for img,info in swapped_images]
logger.info("%s images generated", len(current_images))
for i, img in enumerate(current_images) :
current_images[i] = enhance_image(img,postprocess_options)
for img in current_images :
path = tempfile.NamedTemporaryFile(delete=False,suffix=".png",dir=save_path).name
img.save(path)
images += current_images
return images
except Exception as e:
logger.error("Batch Process error : %s",e)
import traceback
traceback.print_exc()
return None
def tools_ui():
models = get_models()
with gr.Tab("Tools"):
with gr.Tab("Build"):
gr.Markdown(
"""Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.""")
with gr.Row():
batch_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_build_batch_files"
)
preview = gr.components.Image(type="pil", label="Preview", interactive=False, elem_id="faceswaplab_build_preview_face")
name = gr.Textbox(
value="Face",
placeholder="Name of the character",
label="Name of the character",
elem_id="faceswaplab_build_character_name"
)
generate_checkpoint_btn = gr.Button("Save",elem_id="faceswaplab_build_save_btn")
with gr.Tab("Compare"):
gr.Markdown(
"""Give a similarity score between two images (only first face is compared).""")
with gr.Row():
img1 = gr.components.Image(type="pil",
label="Face 1",
elem_id="faceswaplab_compare_face1"
)
img2 = gr.components.Image(type="pil",
label="Face 2",
elem_id="faceswaplab_compare_face2"
)
compare_btn = gr.Button("Compare",elem_id="faceswaplab_compare_btn")
compare_result_text = gr.Textbox(
interactive=False, label="Similarity", value="0", elem_id="faceswaplab_compare_result"
)
with gr.Tab("Extract"):
gr.Markdown(
"""Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""")
with gr.Row():
extracted_source_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_extract_batch_images"
)
extracted_faces = gr.Gallery(
label="Extracted faces", show_label=False,
elem_id="faceswaplab_extract_results"
).style(columns=[2], rows=[2])
extract_save_path = gr.Textbox(label="Destination Directory", value="", elem_id="faceswaplab_extract_destination")
extract_btn = gr.Button("Extract", elem_id="faceswaplab_extract_btn")
with gr.Tab("Explore Model"):
model = gr.Dropdown(
choices=models,
label="Model not found, please download one and reload automatic 1111",
elem_id="faceswaplab_explore_model"
)
explore_btn = gr.Button("Explore", elem_id="faceswaplab_explore_btn")
explore_result_text = gr.Dataframe(
interactive=False, label="Explored",
elem_id="faceswaplab_explore_result"
)
with gr.Tab("Analyse Face"):
img_to_analyse = gr.components.Image(type="pil", label="Face", elem_id="faceswaplab_analyse_face")
analyse_det_threshold = gr.Slider(0.1, 1, 0.5, step=0.01, label="Detection threshold", elem_id="faceswaplab_analyse_det_threshold")
analyse_btn = gr.Button("Analyse", elem_id="faceswaplab_analyse_btn")
analyse_results = gr.Textbox(label="Results", interactive=False, value="", elem_id="faceswaplab_analyse_results")
with gr.Tab("Batch Process"):
with gr.Tab("Source Images"):
gr.Markdown(
"""Batch process images. Will apply enhancement in the tools enhancement tab.""")
with gr.Row():
batch_source_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id="faceswaplab_batch_images"
)
batch_results = gr.Gallery(
label="Batch result", show_label=False,
elem_id="faceswaplab_batch_results"
).style(columns=[2], rows=[2])
batch_save_path = gr.Textbox(label="Destination Directory", value="outputs/faceswap/", elem_id="faceswaplab_batch_destination")
batch_save_btn= gr.Button("Process & Save", elem_id="faceswaplab_extract_btn")
unit_components = []
for i in range(1,opts.data.get("faceswaplab_units_count", 3)+1):
unit_components += faceswap_unit_ui(False, i, id_prefix="faceswaplab_tab")
upscale_options = upscaler_ui()
explore_btn.click(explore_onnx_faceswap_model, inputs=[model], outputs=[explore_result_text])
compare_btn.click(compare, inputs=[img1, img2], outputs=[compare_result_text])
generate_checkpoint_btn.click(build_face_checkpoint_and_save, inputs=[batch_files, name], outputs=[preview])
extract_btn.click(extract_faces, inputs=[extracted_source_files, extract_save_path]+upscale_options, outputs=[extracted_faces])
analyse_btn.click(analyse_faces, inputs=[img_to_analyse,analyse_det_threshold], outputs=[analyse_results])
batch_save_btn.click(batch_process, inputs=[batch_source_files, batch_save_path]+unit_components+upscale_options, outputs=[batch_results])
def on_ui_tabs() :
with gr.Blocks(analytics_enabled=False) as ui_faceswap:
tools_ui()
return [(ui_faceswap, "FaceSwapLab", "faceswaplab_tab")]

@ -0,0 +1,152 @@
from scripts.faceswaplab_swapping import swapper
import numpy as np
import base64
import io
from dataclasses import dataclass, fields
from typing import List, Union
import dill as pickle
import gradio as gr
from insightface.app.common import Face
from PIL import Image
from scripts.faceswaplab_utils.imgutils import (pil_to_cv2,check_against_nsfw)
from scripts.faceswaplab_utils.faceswaplab_logging import logger
@dataclass
class FaceSwapUnitSettings:
# ORDER of parameters is IMPORTANT. It should match the result of faceswap_unit_ui
# The image given in reference
source_img: Union[Image.Image, str]
# The checkpoint file
source_face : str
# The batch source images
_batch_files: Union[gr.components.File,List[Image.Image]]
# Will blend faces if True
blend_faces: bool
# Enable this unit
enable: bool
# Use same gender filtering
same_gender: bool
# Sort faces by their size (from larger to smaller)
sort_by_size : bool
# If True, discard images with low similarity
check_similarity : bool
# if True will compute similarity and add it to the image info
_compute_similarity :bool
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float
# Minimum similarity against the reference (reference or checkpoint if checkpoint is given)
min_ref_sim: float
# The face index to use for swapping
_faces_index: str
# The face index to get image from source
reference_face_index : int
# Swap in the source image in img2img (before processing)
swap_in_source: bool
# Swap in the generated image in img2img (always on for txt2img)
swap_in_generated: bool
@staticmethod
def get_unit_configuration(unit: int, components):
fields_count = len(fields(FaceSwapUnitSettings))
return FaceSwapUnitSettings(
*components[unit * fields_count : unit * fields_count + fields_count]
)
@property
def faces_index(self):
"""
Convert _faces_index from str to int
"""
faces_index = {
int(x) for x in self._faces_index.strip(",").split(",") if x.isnumeric()
}
if len(faces_index) == 0:
return {0}
logger.debug("FACES INDEX : %s", faces_index)
return faces_index
@property
def compute_similarity(self) :
return self._compute_similarity or self.check_similarity
@property
def batch_files(self):
"""
Return empty array instead of None for batch files
"""
return self._batch_files or []
@property
def reference_face(self) :
"""
Extract reference face (only once and store it for the rest of processing).
Reference face is the checkpoint or the source image or the first image in the batch in that order.
"""
if not hasattr(self,"_reference_face") :
if self.source_face and self.source_face != "None" :
with open(self.source_face, "rb") as file:
try :
logger.info(f"loading pickle {file.name}")
face = Face(pickle.load(file))
self._reference_face = face
except Exception as e :
logger.error("Failed to load checkpoint : %s", e)
elif self.source_img is not None :
if isinstance(self.source_img, str): # source_img is a base64 string
if 'base64,' in self.source_img: # check if the base64 string has a data URL scheme
base64_data = self.source_img.split('base64,')[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode
img_bytes = base64.b64decode(self.source_img)
self.source_img = Image.open(io.BytesIO(img_bytes))
source_img = pil_to_cv2(self.source_img)
self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), self.reference_face_index, None)
if self._reference_face is None :
logger.error("Face not found in reference image")
else :
self._reference_face = None
if self._reference_face is None :
logger.error("You need at least one reference face")
return self._reference_face
@property
def faces(self) :
"""_summary_
Extract all faces (including reference face) to provide an array of faces
Only processed once.
"""
if self.batch_files is not None and not hasattr(self,"_faces") :
self._faces = [self.reference_face] if self.reference_face is not None else []
for file in self.batch_files :
if isinstance(file, Image.Image) :
img = file
else :
img = Image.open(file.name)
face = swapper.get_or_default(swapper.get_faces(pil_to_cv2(img)), 0, None)
if face is not None :
self._faces.append(face)
return self._faces
@property
def blended_faces(self):
"""
Blend the faces using the mean of all embeddings
"""
if not hasattr(self,"_blended_faces") :
self._blended_faces = swapper.blend_faces(self.faces)
assert(all([not np.array_equal(self._blended_faces.embedding, face.embedding) for face in self.faces]) if len(self.faces) > 1 else True), "Blended faces cannot be the same as one of the face if len(face)>0"
assert(not np.array_equal(self._blended_faces.embedding,self.reference_face.embedding) if len(self.faces) > 1 else True), "Blended faces cannot be the same as reference face if len(face)>0"
return self._blended_faces

@ -0,0 +1,111 @@
from scripts.faceswaplab_utils.models_utils import get_face_checkpoints
import gradio as gr
def faceswap_unit_ui(is_img2img, unit_num=1, id_prefix="faceswaplab"):
with gr.Tab(f"Face {unit_num}"):
with gr.Column():
gr.Markdown(
"""Reference is an image. First face will be extracted.
First face of batches sources will be extracted and used as input (or blended if blend is activated).""")
with gr.Row():
img = gr.components.Image(type="pil", label="Reference", elem_id=f"{id_prefix}_face{unit_num}_reference_image")
batch_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files"
)
gr.Markdown(
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""")
with gr.Row() :
face = gr.Dropdown(
choices=get_face_checkpoints(),
label="Face Checkpoint (precedence over reference face)",
elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint"
)
refresh = gr.Button(value='', variant='tool', elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints")
def refresh_fn(selected):
return gr.Dropdown.update(value=selected, choices=get_face_checkpoints())
refresh.click(fn=refresh_fn,inputs=face, outputs=face)
with gr.Row():
enable = gr.Checkbox(False, placeholder="enable", label="Enable", elem_id=f"{id_prefix}_face{unit_num}_enable")
blend_faces = gr.Checkbox(
True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)",
elem_id=f"{id_prefix}_face{unit_num}_blend_faces",
interactive=True
)
gr.Markdown("""Discard images with low similarity or no faces :""")
with gr.Row():
check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity",
elem_id=f"{id_prefix}_face{unit_num}_check_similarity")
compute_similarity = gr.Checkbox(False, label="Compute similarity",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity")
min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity")
min_ref_sim = gr.Slider(
0, 1, 0, step=0.01, label="Min reference similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity"
)
gr.Markdown("""Select the face to be swapped, you can sort by size or use the same gender as the desired face:""")
with gr.Row():
same_gender = gr.Checkbox(
False, placeholder="Same Gender", label="Same Gender",
elem_id=f"{id_prefix}_face{unit_num}_same_gender"
)
sort_by_size = gr.Checkbox(
False, placeholder="Sort by size", label="Sort by size (larger>smaller)",
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size"
)
target_faces_index = gr.Textbox(
value="0",
placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)",
label="Target face : Comma separated face number(s)",
elem_id=f"{id_prefix}_face{unit_num}_target_faces_index"
)
gr.Markdown("""The following will only affect reference face image (and is not affected by sort by size) :""")
reference_faces_index = gr.Number(
value=0,
precision=0,
minimum=0,
placeholder="Which face to get from reference image start from 0",
label="Reference source face : start from 0",
elem_id=f"{id_prefix}_face{unit_num}_reference_face_index"
)
gr.Markdown("""Configure swapping. Swapping can occure before img2img, after or both :""", visible=is_img2img)
swap_in_source = gr.Checkbox(
False,
placeholder="Swap face in source image",
label="Swap in source image (blended face)",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_source"
)
swap_in_generated = gr.Checkbox(
True,
placeholder="Swap face in generated image",
label="Swap in generated image",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated"
)
# If changed, you need to change FaceSwapUnitSettings accordingly
# ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings
return [
img,
face,
batch_files,
blend_faces,
enable,
same_gender,
sort_by_size,
check_similarity,
compute_similarity,
min_sim,
min_ref_sim,
target_faces_index,
reference_faces_index,
swap_in_source,
swap_in_generated,
]

@ -0,0 +1,80 @@
import gradio as gr
import modules
from modules import shared, sd_models
from modules.shared import cmd_opts, opts, state
import scripts.faceswaplab_postprocessing.upscaling as upscaling
from scripts.faceswaplab_utils.faceswaplab_logging import logger
def upscaler_ui():
with gr.Tab(f"Post-Processing"):
gr.Markdown(
"""Upscaling is performed on the whole image. Upscaling happens before face restoration.""")
with gr.Row():
face_restorer_name = gr.Radio(
label="Restore Face",
choices=["None"] + [x.name() for x in shared.face_restorers],
value=lambda : opts.data.get("faceswaplab_pp_default_face_restorer", shared.face_restorers[0].name()),
type="value",
elem_id="faceswaplab_pp_face_restorer"
)
with gr.Column():
face_restorer_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_visibility", 1), step=0.001, label="Restore visibility",
elem_id="faceswaplab_pp_face_restorer_visibility"
)
codeformer_weight = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_face_restorer_weight", 1), step=0.001, label="codeformer weight",
elem_id="faceswaplab_pp_face_restorer_weight"
)
upscaler_name = gr.Dropdown(
choices=[upscaler.name for upscaler in shared.sd_upscalers],
value= lambda:opts.data.get("faceswaplab_pp_default_upscaler","None"),
label="Upscaler",
elem_id="faceswaplab_pp_upscaler"
)
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale", elem_id="faceswaplab_pp_upscaler_scale")
upscaler_visibility = gr.Slider(
0, 1, value=lambda:opts.data.get("faceswaplab_pp_default_upscaler_visibility", 1), step=0.1, label="Upscaler visibility (if scale = 1)",
elem_id="faceswaplab_pp_upscaler_visibility"
)
with gr.Accordion(f"Post Inpainting", open=True):
gr.Markdown(
"""Inpainting sends image to inpainting with a mask on face (once for each faces).""")
inpainting_when = gr.Dropdown(
elem_id="faceswaplab_pp_inpainting_when", choices = [e.value for e in upscaling.InpaintingWhen.__members__.values()],value=[upscaling.InpaintingWhen.BEFORE_RESTORE_FACE.value], label="Enable/When")
inpainting_denoising_strength = gr.Slider(
0, 1, 0, step=0.01, elem_id="faceswaplab_pp_inpainting_denoising_strength", label="Denoising strenght (will send face to img2img after processing)"
)
inpainting_denoising_prompt = gr.Textbox("Portrait of a [gender]",elem_id="faceswaplab_pp_inpainting_denoising_prompt", label="Inpainting prompt use [gender] instead of men or woman")
inpainting_denoising_negative_prompt = gr.Textbox("", elem_id="faceswaplab_pp_inpainting_denoising_neg_prompt", label="Inpainting negative prompt use [gender] instead of men or woman")
with gr.Row():
samplers_names = [s.name for s in modules.sd_samplers.all_samplers]
inpainting_sampler = gr.Dropdown(
choices=samplers_names,
value=[samplers_names[0]],
label="Inpainting Sampler",
elem_id="faceswaplab_pp_inpainting_sampler"
)
inpainting_denoising_steps = gr.Slider(
1, 150, 20, step=1, label="Inpainting steps",
elem_id="faceswaplab_pp_inpainting_steps"
)
inpaiting_model = gr.Dropdown(choices=["Current"]+sd_models.checkpoint_tiles(), default="Current", label="sd model (experimental)", elem_id="faceswaplab_pp_inpainting_sd_model")
return [
face_restorer_name,
face_restorer_visibility,
codeformer_weight,
upscaler_name,
upscaler_scale,
upscaler_visibility,
inpainting_denoising_strength,
inpainting_denoising_prompt,
inpainting_denoising_negative_prompt,
inpainting_denoising_steps,
inpainting_sampler,
inpainting_when,
inpaiting_model
]

@ -0,0 +1,53 @@
import logging
import copy
import sys
from modules import shared
from PIL import Image
class ColoredFormatter(logging.Formatter):
COLORS = {
"DEBUG": "\033[0;36m", # CYAN
"INFO": "\033[0;32m", # GREEN
"WARNING": "\033[0;33m", # YELLOW
"ERROR": "\033[0;31m", # RED
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
"RESET": "\033[0m", # RESET COLOR
}
def format(self, record):
colored_record = copy.copy(record)
levelname = colored_record.levelname
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
return super().format(colored_record)
# Create a new logger
logger = logging.getLogger("FaceSwapLab")
logger.propagate = False
# Add handler if we don't have one.
if not logger.handlers:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
ColoredFormatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
# Configure logger
loglevel_string = getattr(shared.cmd_opts, "faceswaplab_loglevel", "INFO")
loglevel = getattr(logging, loglevel_string.upper(), "INFO")
logger.setLevel(loglevel)
import tempfile
if logger.getEffectiveLevel() <= logging.DEBUG :
DEBUG_DIR = tempfile.mkdtemp()
def save_img_debug(img: Image.Image, message: str, *opts):
if logger.getEffectiveLevel() <= logging.DEBUG:
with tempfile.NamedTemporaryFile(dir=DEBUG_DIR, delete=False, suffix=".png") as temp_file:
img_path = temp_file.name
img.save(img_path)
message_with_link = f"{message}\nImage: file://{img_path}"
logger.debug(message_with_link, *opts)

@ -0,0 +1,185 @@
import io
from typing import Optional
from PIL import Image, ImageChops, ImageOps,ImageFilter
import cv2
import numpy as np
from math import isqrt, ceil
import torch
from ifnude import detect
from scripts.faceswaplab_globals import NSFW_SCORE
from modules import processing
import base64
def check_against_nsfw(img):
shapes = []
chunks = detect(img)
for chunk in chunks:
shapes.append(chunk["score"] > NSFW_SCORE)
return any(shapes)
def pil_to_cv2(pil_img):
return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
def cv2_to_pil(cv2_img):
return Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB))
def torch_to_pil(images):
"""
Convert a numpy image or a batch of images to a PIL image.
"""
images = images.cpu().permute(0, 2, 3, 1).numpy()
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def pil_to_torch(pil_images):
"""
Convert a PIL image or a list of PIL images to a torch tensor or a batch of torch tensors.
"""
if isinstance(pil_images, list):
numpy_images = [np.array(image) for image in pil_images]
torch_images = torch.from_numpy(np.stack(numpy_images)).permute(0, 3, 1, 2)
return torch_images
numpy_image = np.array(pil_images)
torch_image = torch.from_numpy(numpy_image).permute(2, 0, 1)
return torch_image
from collections import Counter
def create_square_image(image_list):
"""
Creates a square image by combining multiple images in a grid pattern.
Args:
image_list (list): List of PIL Image objects to be combined.
Returns:
PIL Image object: The resulting square image.
None: If the image_list is empty or contains only one image.
"""
# Count the occurrences of each image size in the image_list
size_counter = Counter(image.size for image in image_list)
# Get the most common image size (size with the highest count)
common_size = size_counter.most_common(1)[0][0]
# Filter the image_list to include only images with the common size
image_list = [image for image in image_list if image.size == common_size]
# Get the dimensions (width and height) of the common size
size = common_size
# If there are more than one image in the image_list
if len(image_list) > 1:
num_images = len(image_list)
# Calculate the number of rows and columns for the grid
rows = isqrt(num_images)
cols = ceil(num_images / rows)
# Calculate the size of the square image
square_size = (cols * size[0], rows * size[1])
# Create a new RGB image with the square size
square_image = Image.new("RGB", square_size)
# Paste each image onto the square image at the appropriate position
for i, image in enumerate(image_list):
row = i // cols
col = i % cols
square_image.paste(image, (col * size[0], row * size[1]))
# Return the resulting square image
return square_image
# Return None if there are no images or only one image in the image_list
return None
def create_mask(image, box_coords):
width, height = image.size
mask = Image.new("L", (width, height), 255)
x1, y1, x2, y2 = box_coords
for x in range(width):
for y in range(height):
if x1 <= x <= x2 and y1 <= y <= y2:
mask.putpixel((x, y), 255)
else:
mask.putpixel((x, y), 0)
return mask
def apply_mask(img : Image.Image,p : processing.StableDiffusionProcessing, batch_index : int) -> Image.Image :
"""
Apply mask overlay and color correction to an image if enabled
Args:
img: PIL Image objects.
p : The processing object
batch_index : the batch index
Returns:
PIL Image object
"""
if isinstance(p, processing.StableDiffusionProcessingImg2Img) :
if p.inpaint_full_res :
overlays = p.overlay_images
if overlays is None or batch_index >= len(overlays):
return img
overlay : Image.Image = overlays[batch_index]
overlay = overlay.resize((img.size), resample= Image.Resampling.LANCZOS)
img = img.copy()
img.paste(overlay, (0, 0), overlay)
return img
img = processing.apply_overlay(img, p.paste_to, batch_index, p.overlay_images)
if p.color_corrections is not None and batch_index < len(p.color_corrections):
img = processing.apply_color_correction(p.color_corrections[batch_index], img)
return img
def prepare_mask(
mask: Image.Image, p: processing.StableDiffusionProcessing
) -> Image.Image:
"""
Prepare an image mask for the inpainting process. (This comes from controlnet)
This function takes as input a PIL Image object and an instance of the
StableDiffusionProcessing class, and performs the following steps to prepare the mask:
1. Convert the mask to grayscale (mode "L").
2. If the 'inpainting_mask_invert' attribute of the processing instance is True,
invert the mask colors.
3. If the 'mask_blur' attribute of the processing instance is greater than 0,
apply a Gaussian blur to the mask with a radius equal to 'mask_blur'.
Args:
mask (Image.Image): The input mask as a PIL Image object.
p (processing.StableDiffusionProcessing): An instance of the StableDiffusionProcessing class
containing the processing parameters.
Returns:
mask (Image.Image): The prepared mask as a PIL Image object.
"""
mask = mask.convert("L")
#FIXME : Properly fix blur
# if getattr(p, "mask_blur", 0) > 0:
# mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur))
return mask
def base64_to_pil(base64str : Optional[str]) -> Optional[Image.Image] :
if base64str is None :
return None
if 'base64,' in base64str: # check if the base64 string has a data URL scheme
base64_data = base64str.split('base64,')[-1]
img_bytes = base64.b64decode(base64_data)
else:
# if no data URL scheme, just decode
img_bytes = base64.b64decode(base64str)
return Image.open(io.BytesIO(img_bytes))

@ -0,0 +1,55 @@
import glob
import os
import modules.scripts as scripts
from modules import scripts
from scripts.faceswaplab_globals import EXTENSION_PATH
from modules.shared import opts
from scripts.faceswaplab_utils.faceswaplab_logging import logger
def get_models():
"""
Retrieve a list of swap model files.
This function searches for model files in the specified directories and returns a list of file paths.
The supported file extensions are ".onnx".
Returns:
A list of file paths of the model files.
"""
models_path = os.path.join(scripts.basedir(), EXTENSION_PATH, "models", "*")
models = glob.glob(models_path)
# Add an additional models directory and find files in it
models_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "*")
models += glob.glob(models_path)
# Filter the list to include only files with the supported extensions
models = [x for x in models if x.endswith(".onnx")]
return models
def get_current_model() -> str :
model = opts.data.get("faceswaplab_model", None)
if model is None :
models = get_models()
model = models[0] if len(models) else None
logger.info("Try to use model : %s", model)
if not os.path.isfile(model):
logger.error("The model %s cannot be found or loaded", model)
raise FileNotFoundError("No faceswap model found. Please add it to the faceswaplab directory.")
return model
def get_face_checkpoints():
"""
Retrieve a list of face checkpoint paths.
This function searches for face files with the extension ".pkl" in the specified directory and returns a list
containing the paths of those files.
Returns:
list: A list of face paths, including the string "None" as the first element.
"""
faces_path = os.path.join(scripts.basedir(), "models", "faceswaplab", "faces", "*.pkl")
faces = glob.glob(faces_path)
return ["None"] + faces
Loading…
Cancel
Save