2026-01-01

This commit is contained in:
2026-03-17 15:16:34 -06:00
parent ec4cf523fb
commit b80274187b
263 changed files with 95164 additions and 3848 deletions
@@ -33,6 +33,20 @@ Links
## Changelog
### 2.3.4
- Add / apply physics resets physics to frame 1.
- Reset all dynamics added to cloth physics panel.
- Reset physics sets frame sync to "play all frames".
- Warning on bake physics button when not "play all frames".
- Fixes to:
- Spring rig simulation reset error when baked.
- Remove all physics error.
- Rigidbody colliders positions when character rig is not at origin.
- Physics weight paint error in Blender 5.0.
- ARKit Proxy CSV load error in Blender 5.0.
- Receive Pose/Sequence error in Blender 5.0.
- Material bake in Blender 5.0.
### 2.3.3
- Blender 5.0 update.
- Blender 5.0 API changed bone selection methods and compositor which broke just about everything ...
@@ -114,7 +114,7 @@ from . import rlx
bl_info = {
"name": "CC/iC Tools",
"author": "Victor Soupday",
"version": (2, 3, 3),
"version": (2, 3, 4),
"blender": (3, 4, 1),
"category": "Characters",
"location": "3D View > Properties > CC/iC Pipeline",
+20 -5
View File
@@ -56,6 +56,10 @@ def prep_bake(context, mat: bpy.types.Material=None, samples=BAKE_SAMPLES, image
# cycles settings
bake_state["samples"] = context.scene.cycles.samples
if utils.B500():
bake_state["use_bake_multires"] = context.scene.render.bake.use_multires
else:
bake_state["use_bake_multires"] = context.scene.render.use_bake_multires
# Blender 3.0
if utils.B300():
bake_state["preview_samples"] = context.scene.cycles.preview_samples
@@ -68,7 +72,6 @@ def prep_bake(context, mat: bpy.types.Material=None, samples=BAKE_SAMPLES, image
bake_state["file_format"] = context.scene.render.image_settings.file_format
bake_state["color_depth"] = context.scene.render.image_settings.color_depth
bake_state["color_mode"] = context.scene.render.image_settings.color_mode
bake_state["use_bake_multires"] = context.scene.render.use_bake_multires
bake_state["use_selected_to_active"] = context.scene.render.bake.use_selected_to_active
bake_state["use_pass_direct"] = context.scene.render.bake.use_pass_direct
bake_state["use_pass_indirect"] = context.scene.render.bake.use_pass_indirect
@@ -87,7 +90,10 @@ def prep_bake(context, mat: bpy.types.Material=None, samples=BAKE_SAMPLES, image
context.scene.cycles.samples = samples
context.scene.render.image_settings.file_format = image_format
context.scene.render.use_bake_multires = False
if utils.B500():
context.scene.render.bake.use_multires = False
else:
context.scene.render.use_bake_multires = False
context.scene.render.bake.use_selected_to_active = False
context.scene.render.bake.use_pass_direct = False
context.scene.render.bake.use_pass_indirect = False
@@ -126,7 +132,10 @@ def prep_bake(context, mat: bpy.types.Material=None, samples=BAKE_SAMPLES, image
bake_state["engine"] = context.scene.render.engine
context.scene.render.engine = 'CYCLES'
bake_state["cycles_bake_type"] = context.scene.cycles.bake_type
bake_state["render_bake_type"] = context.scene.render.bake_type
if utils.B500():
bake_state["render_bake_type"] = context.scene.render.bake.type
else:
bake_state["render_bake_type"] = context.scene.render.bake_type
context.scene.cycles.bake_type = "COMBINED"
@@ -184,7 +193,10 @@ def post_bake(context, state):
context.scene.render.image_settings.file_format = state["file_format"]
context.scene.render.image_settings.color_depth = state["color_depth"]
context.scene.render.image_settings.color_mode = state["color_mode"]
context.scene.render.use_bake_multires = state["use_bake_multires"]
if utils.B500():
context.scene.render.bake.use_multires = state["use_bake_multires"]
else:
context.scene.render.use_bake_multires = state["use_bake_multires"]
context.scene.render.bake.use_selected_to_active = state["use_selected_to_active"]
context.scene.render.bake.use_pass_direct = state["use_pass_direct"]
context.scene.render.bake.use_pass_indirect = state["use_pass_indirect"]
@@ -209,7 +221,10 @@ def post_bake(context, state):
# bake type
context.scene.cycles.bake_type = state["cycles_bake_type"]
context.scene.render.bake_type = state["render_bake_type"]
if utils.B500():
context.scene.render.bake.type = state["render_bake_type"]
else:
context.scene.render.bake_type = state["render_bake_type"]
# remove the bake surface
if "bake_surface" in state:
@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
@@ -0,0 +1,884 @@
# CC/iC Blender Tools (Installed in Blender)
An add-on for importing and automatically setting up materials for Character Creator 3, 4 & 5 and iClone 7 & 8 character exports.
Using Blender in the Character Creator pipeline can often feel like hitting a brick wall. Spending potentially hours having to get the import settings correct and setting up the materials often with hundreds of textures.
This add-on aims to reduce that time spent getting characters into Blender down to just a few seconds and make use of as many of the exported textures as possible so that character artists can work in the highest quality possible using Blender.
[Online Documentation](https://soupday.github.io/cc_blender_tools/index.html)
[Reallusion Forum Thread](https://forum.reallusion.com/475005/Blender-Auto-Setup)
Links
=====
[CC4/5 Blender Pipeline Tool (Installed in CC4/5)](https://github.com/soupday/CCiC-Blender-Pipeline-Plugin)
[CC3 Blender Pipeline Tool (Installed in CC3)](https://github.com/soupday/CC3-Blender-Tools-Plugin)
## Installation, Updating, Removal
### To Install
- Download the [latest release](https://github.com/soupday/cc_blender_tools/releases).
- In Blender go to menu **Edit**->**Preferences** then select **Add-ons**.
- Click the **Install** button at the top of the preferences window and navigate to where you downloaded the zip file, select the file and click **Install Add-on**.
- Activate the add-on by ticking the checkbox next to **Edit**->**Preferences** then select **Add-ons**
- The add-ons functionality is available through the **CC/iC Blender Tools** Tab in the tool menu to the right of the main viewport. Press _N_ to show the tools if they are hidden.
### To Remove
- From the menu: **Edit**->**Preferences** then select **Add-ons**
- In the search box search **All** add-ons for **"CC/iC Blender Tools"**
- Deactivate the add-on by unticking the checkbox next to **Edit**->**Preferences** then select **Add-ons**.
- Then click the **Remove** button.
### To Update
- Remove the current version of the add-on by following the remove instructions above.
- Follow the installation instructions, above, to install the new version.
## Changelog
### 2.3.4
- Add / apply physics resets physics to frame 1.
- Reset all dynamics added to cloth physics panel.
- Reset physics sets frame sync to "play all frames".
- Warning on bake physics button when not "play all frames".
- Fixes to:
- Spring rig simulation reset error when baked.
- Remove all physics error.
- Rigidbody colliders positions when character rig is not at origin.
- Physics weight paint error in Blender 5.0.
- ARKit Proxy CSV load error in Blender 5.0.
- Receive Pose/Sequence error in Blender 5.0.
- Material bake in Blender 5.0.
### 2.3.3
- Blender 5.0 update.
- Blender 5.0 API changed bone selection methods and compositor which broke just about everything ...
- Eye shader updates, Limbus region should be more accurate to CC4/5 now.
- Expression drivers for CC3 rig use expression json.
- Materials build for current render engine (Eevee / Cycles).
- Buttons activate on Materials Parameters and DataLink panels to rebuild materials if changed.
- Rigify will fallback to next best face rig if selected in unavailable.
- Widgets for face rig included in Link/Append function.
- Fix to rigidbody collider orientation.
- Import option added in preferences to reset all custom normals on import.
- On some characters, custom split normals cause dark lighting artifacts that can be cured by removing custom normals.
- Camera switch markers generated when sending camera's across DataLink.
### 2.3.2
- Shaders updated for CC5:
- Skin shaders updated to use cavity maps and dual specular blending.
- Eye & Hair shaders updated.
- *Tearline Plus* and *Eye Occlusion Plus* shaders added.
- Wrinkle map system update for MH facial profile.
- Wrinkle map displacement update for CC5 head material.
- Textures packed with compositor for faster wrinkle system setup.
- Facerig updated to use expression json bone data.
- MetaHuman profile facial expression rig added.
- Expression control constraints and limits.
- Max texture pack size option added.
- Datalink use automatic lighting option added.
### 2.3.1
- 1:1 Viseme driver fix.
- Fix to Rigify bone locks being cleared by pose reset.
- Export Rigify motion only fix.
- CC Aligned metarig finger rotation fix.
- Fix to FBX exports not adding correct scale units.
- This was causing Rigify export avatar bones to scale incorrectly in Unity.
- Removed Rigify disable IK stretch option as Rigify IK doesn't work correctly without it.
- Rigify export using CC Base naming aligns bones to original CC rig orientations.
- In particular the jaw bones and eye bones should have a more compatible alignment. i.e. Unity ConvAI.
- Exit DataLink shutdown moved from atexit to Unregister.
### 2.3.0
- Rigify:
- Meta-human like facial expression rig for Extended, Standard and Traditional facial profiles.
- Expression rig supports retargeting, datalink motion transfer and Pose/Sequences to and from CC4/iClone8.
- Options to adjust control colours, and face rig placement and attachment to character.
- Supports ActorCore, ActorScan and ActorBuild characters.
- The expression rig allows for more advanced targeting and transfer, for now:
- ARKit Proxy:
- The ARKit proxy can be used as a target for LiveLink face transfer (e.g. from Face-it add-on)
- The proxy will drive the controls on the expression rig, which in turn drives the expression shape keys and head bones on the character.
- Adjustment parameters can be used to alter expression strength, relax / exaggerate expressions, shift the directional bias towards Left/Right and Up/Down expressions.
- Rotational adjustments for head bone.
- Load CSV function to load face capture recordings in CSV format.
- In particular this also works with the output from [Face Landmark Link](https://github.com/Qaanaaq/Face_Landmark_Link/)
- DataLink:
- Motion Pose/Sequences no longer dependent on Quaternions, preserves bone rotation modes across the the rig.
- Avatar/Prop skin bones identified by iClone ID, for accurate transfer and retargeting.
- Fix to incorrect prop mesh positioning due to bugged exports from iClone.
- Not all prop hierarchies can be exported exactly to Blender, sometimes they must have their transforms or scale reset in iClone.
- Which now allows for Pose/Sequence transfer of Prop animations from Blender back to iClone.
- Import & Transfer of lights and cameras with animations, including pose and sequences.
- Light & Camera sequence can be send back to iClone via Sequence transfer.
- (Must already exist in iClone for this to work)
- Lights can use IES and light cookie textures (Cycles Only)
- Spring Rigs:
- Hair spring bone generation fixes for Blender 4.4.
- Fix to remove and rebuild Rigify spring bone control rigs and simulations.
- Materials:
- Fix to incorrect embedded alpha channels from FBX importer.
- Fix to embedded alpha channel custom UV mappings.
- Emission rework, as multiplier on base color, with expanded strength.
- Reflection surface supported as clearcoat.
- Default (no texture) roughness and specular value fixes for Pbr and Tra shaders.
- General Fixes for Blender versions 3.4 - 4.4
- Wrinkle masks (local library images) packed into blend file.
- Export material fixes when image missing from texture image node.
- Export rigified error fixes.
- Convert to humanoid export json avatar type fix.
- Fix to Rigify export blendshapes on Rigify face rig and No face rig.
### 2.2.5
- Fix to Blender 4.1 material refraction setting.
- Rigify setup defaults rig to zero IK stretch.
- Fix to drivers trying build on OBJ / Morph imports.
- Some Bake streamlining and Baked GLTF export button.
- Datalink remote file transfer, to and from remote CC/iClone.
- Wrinkle Region ALL, driver update fix.
- Scene Tools - Eevee setup button.
- Blender 4.4 action slot fixes.
- Fix to Blender 4.4 proportion edit crash when removing all shape keys.
### 2.2.4
- Rigify face rig fallback to envelope weights if auto weights fail.
- DataLink Import motion will optionally (confirmation dialog) import motion to active character if no matching character.
- Rigify head turn expression driver corrected.
- Fixed collision objects being included in export.
- Fixed displacement strength export.
### 2.2.3
- Wrinkle map region strength controls added.
- Nose crease wrinkle maps added to Mouth_Smile_* expressions.
- Export Bake:
- Fixes and alpha fixes in Blender 4.3
- Iris Brightness adjustments removed when baking.
### 2.2.2
- Teeth and tongue added to bone / expression drivers.
- Meta-rig bone alignment options added the Basic rig panel.
- DataLink pose functions no longer break expression drivers.
### 2.2.1
- Material and Lighting fixes for Blender 4.3.
- When exporting or sending Rigified animations: IK stretch is now disabled in the rig.
- This should help with limb alignment problems on other platforms.
- Rigify Metarig bone rolls are aligned exactly to the CC/iC source bones now.
- This can be disabled in the preferences (or advanced settings) to use the original Metarig bone roll alignments.
- DataLink Send Avatar will ask to overwrite (or cancel transfer) any existing *same* character.
- Generic import option to disable auto-conversion of materials.
- Fix to import hanging when no characters in FBX.
- Rebuild drivers also rebuilds Rigify shape key drivers.
- DataLink transfer Sequence and Pose actions separately labelled "Sequence" and "Pose" no longer just "DataLink" for both.
- Operations that use object or mesh duplicates no longer duplicate the actions on the objects.
### 2.2.0
- Character Management functions - CCiC Create > Character Management:
- Transfer weights supports split body meshes.
- Voxel head diffuse skinning (if installed) button for selected character meshes.
- Clean Empty Data: Removes empty shape keys and empty vertex groups from the character meshes.
- Blend Body Weights: Blends body vertex weights with existing vertex weights on selected objects based on distance from the surface of the body, to correct vertex weighting for clothing items that don't conform correctly to the body, e.g. from Voxel Heat Diffusion weights or from Daz original weights.
- New scene presets added:
- scene view transform and world background strength controls added.
### 2.1.10
- Sets up Rigidbody physics when _appending_ a character from another library blend file via the Append button in the import panel.
- Note: Rigidbody physics does *not* work with linked character _overrides_.
- Export non-standard character fix.
- Support for split meshes when exporting, rebuilding materials, physics & rigging:
- Any mesh separated or duplicated from a source character mesh will be considered part of the character and an extension of that object.
- For split body meshes, the mesh with the head will be used as the source for facial expression drivers and wrinkle map drivers.
- Note: Split *body* meshes will _not_ import back into CC4 as standard CC3+ characters.
- Added buttons to rebuild and to remove just the expression and bone drivers to the Character Build Settings panel.
### 2.1.9
- Maintain operator context for scene operations.
- Align to view distance fix.
### 2.1.8
- Fixes to view 3d shading context in 4.2.
- Imported objects, materials, images and actions detection no longer uses tags.
- Added more checks to skip null material slots.
### 2.1.7
- Sync lights includes Scene IBL from CC/iC visual settings.
- Export to CC3/4 button fixed.
- Added extra checks for AccuRig / ActorScan when unknown humanoids detected.
### 2.1.6
- Import supports multiple file selection.
- To import multiple objects from a folder, press shift + select the FBX files in the importer file selection window.
- Fixes for blender version before 4.0:
- Replace mesh OBJ export.
- Rigging bone layer assignment.
- Property collection clear.
- Rigging widget control scale.
### 2.1.5
- Proportion edit should work on all character types (except Rigified)
- Sync lights and send pose should work in all modes and keeps the active mode.
- Lighting brightness adjust slider.
- Some GameBase detection fixes.
- GameBase skin roughness tweaks.
- Fix for vertex color sampling when json data is missing.
### 2.1.4
- DataLink receive Update / Replace function, to replace whole characters or selected parts.
- Fix to character validation and clean-up.
### 2.1.3
- Motion Set UI errors fixed.
- Eevee-Next SSR Eyes fixes.
- Iris brightness render settings for Eevee & Cycles.
- Bake fixes for Blender 4.2 refractive eyes.
- Export-Bake warnings when not build for Eevee.
- Lighting tweaks.
### 2.1.2
- Blender 4.2 lighting settings fixes and adjustments.
- Eevee-Next Raytracing, shadows and shadow jitter enabled on render settings and lights.
- Blender 4.0+ lighting presets use AgX.
- Eevee & Cycles global material options.
- Control of SSS weights, roughnes power and normals for various material types.
### 2.1.1
- Fix UDIM flattening on proportion editing and sculpt base mesh transfer.
- Lighting sync area correction.
- DataLink plugin version compatibility check.
### 2.1.0
- Motion Sets:
- Action name remapping and retargetting overhaul.
- Motion set functions: Load/Push/Clear/Select/Rename/Delete
- Motion prefix and use fake user option added for all animation retargeting and import.
- Motion set filter and motion set info function.
- NLA Tools:
- NLA Bake functions moved to NLA editor panel.
- Motion set panel in rigging and NLA editor and DataLink.
- NLA strip alignment and sizing utility functions.
- Fixes:
- Duplicating character no longer duplicates actions.
- Store object state checks objects/materials exist.
- Positioning fixes with rigify
### 2.0.9
- DataLink:
- DataLink main loop stability improvements.
- Live sequence back to CC4/iClone takes facial expression bone rotations into account.
- Live Sequence stop button.
- Replace mesh function: Quickly send (non topology changing) mesh alterations back to CC4.
- Update material & texture function: Send selected material data and textures back to CC4.
- Sync lighting recalculations.
- Export:
- Restores armature and object states after export.
- Fix to baking custom Diffuse Map channel.
- Fix to Blender 3.4-3.6 Eevee subsurface color.
- Material Parameter controls disabled for linked characters (unless library override).
### 2.0.8
- Import/Export:
- Fix to Update Unity Project being greyed out after saving as Blend file.
- Fix to export non-standard character.
- Fix to export non-standard ARP rigged character.
- (Blender 4.0+) Fix to reverting object and material name changes on export and other force name changes.
- Generic character/prop import expanded to support USD/USDZ.
- Rigify export now have choice of naming system:
- Metarig names (without Root bone) for exporting animations back into CC/iC.
- CC Base names (with Root bone) for exporting to Unity Auto-setup.
- Rigified characters/motion exports now generate custom HIK profile which can be used to import/convert Rigified motion exports into CC/iC.
- DataLink Rigified characters (optionally) disable tweak bones as they are not compatible with CC/iC animation.
- Scene tools:
- Scene lighting presets overhauled.
- Added function to align any object to view location and orientation (useful for placing lights and cameras).
- Added function to add a camera at current view location and orientation.
- Added function to setup a main face tracking camera centered on character's head.
- Added function to convert current view studio lighting into world lighting node setup.
- Rigify:
- Fix to GameBase detection.
- Fix to AccuRig generation code not being recognized as valid Rigify target.
- Fix to support Mixamorigs with suffix numbers in retargeting.
- Auto-retarget toggle added to automatically retarget any animation on character when using Quick Rigify.
- Shaders:
- Skin, eye and hair shaders updated to use Blender 4.0+ Random Walk (Skin) Subsurface Scattering.
- Displacement map (if present) will be used on skin materials for bump and mesh displacement.
- Cycles subsurface calculations and parameters tweaked.
- Separate cycles modifiers for Blender 3.4-3.6 and 4.0+.
- Eye Sclera color tint added.
- Cycles Tearline shader reworked.
- Character Management
- Character edit function added.
- Character duplicate function added (duplicates character objects and meta-data so can be used and configured independently).
- Character tools (select/rigify/convert/delete/duplicate), also sub-panel in DataLink.
- Convert to accessory fix.
- DataLink:
- Added Receiving prop posing and animation live sequence.
- Added custom prop rig control bones when sending through datalink.
- Added Direct Motion Transfer from CC/iC (automatic motion export->import).
- Added "Go iC" button to send (just props for now) back to iClone.
### 2.0.7
- Attempts to restore datalink when reloading linked blend file.
- All returning datalink operators will attempt to first reconnect if not connected.
- Facial expressions included in datalink send pose and sequence. (But not visemes)
- Currently certain expression bone movements are conflicting with existing bone movements.
- You may wish to avoid the Head_Turn expressions as a consequence.
- Character Proportion editing mode added to CC/iC Create panels.
- Spring bone hair binding will add an armature modifier for the hair object if absent, to allow binding for newly created hair mashes.
- Scale body weights now acts on the normalized existing hair weights.
### 2.0.6
- Restored Rigify retarget limb correction utilities.
- Fix to Blender 4.1 import crash caused by 4.1 removing auto-smoothed normals.
### 2.0.5
- Fix to converting generic objects to props.
- Fix to baking value textures back to CC4 when exporting converted props and humanoids.
- DataLink data send rate synchronization improvements.
- Rigify retarget and NLA bake options to bake to FK/IK/Both.
- Rig FK/IK mode set appropriately, unchanged when baking to 'Both'.
- Quick FK/IK switch button added to rigify mini-panel.
- Send Rigified pose or sequence fix.
- Rigify Jaw alignment changed to -Z.
### 2.0.4
- Linking/Appending:
- Added linking/append functions to auto-link to characters in blend files with full character data and functionality.
- Added connect function to re-build character data for linked/appended characters.
- Added custom properties to armatures/meshes and materials to aid re-connection of character data.
- Rebuilding materials will add this custom data to existing characters.
- Auto-linked/Re-connected characters can use full add-on functionality i.e. rigging, retargeting, exporting, rebuilding materials, etc...
- Rigify:
- DataLink pose retargeting teeth position fix.
- Eye bone and jaw bone alignments corrected.
- Face rig jaw constraints adjusted for less lip deformation.
- Parallax eye shader AO fix.
- Basic materials SSS fixes.
- Importing a bad or incompatible mesh should fail more gracefully.
### 2.0.3
- DataLink:
- Lighting and Camera sync.
- Send Character (Go-CC) back to CC4.
- Facial expressions and Visemes transferred in the pose and animation sequencing.
- Animation sequence now writes to rig and shape-key action tracks directly using low level fast keyframing, all at once at the end. Which is much faster, for both native rig and Rigify rig.
- Sequence rate matching so CC4 doesn't get too far ahead of itself.
- Supports Morph editing and mesh updating for sending back morph OBJ for automatic morph slider creation in CC4.
- Subsurface Recalculations (you may need to reset the preferences in the add-on prefs)
- Export of Hue, Sat, Brightness params to CC4.
- Fixed SSS material detection.
- Fixed ActorBuild generation detection and export.
- HIK and facial profiles copied with character export (if generated by CC4)
- OBJ import/export fix for Blender 4.0.
- OBJ import now supports full materials.
- Fixed T-pose orientation when exporting Rigified animation.
### 2.0.2
- Correction to malformed json texture paths when exporting character from CC4 directly to the root of a drive.
- Disabled image search on FBX importer, should import a little faster now.
### 2.0.1
- VRM import fixes.
- VRM to CC4 export generates HIK profile for auto characterization.
- Rigify fixes:
- Face bone roll axis corrections.
- Tongue bone meta-rig positioning corrections.
- Teeth bone retargeting corrections.
### 2.0.0
- Blender 4.0 support.
- WIP Experimental DataLink added:
- Currently in alpha stages, more a proof of concept at the moment.
- Bake add-on updated and merged into this project.
### 1.6.1
- Object Management:
- Generic material conversion better detects AO maps in Blender 3+
- Transfer vertex weights with posed armature fix.
- Empty transform hierarchy to Prop conversion puts bones in the right correct places.
- Exporting rigified animations with parented armatures now excludes those armatures from export.
### 1.6.0.4
- Fixed bake rigify retarget not assigning action to rig after baking
- Removes facial expression bone drivers on Rigifying (caused cyclic dependencies)
- Re-importing/rebuilding materials on a character will reload any texture images that are being re-used from existing or previous imports, just in case they have been changed on disk.
- Except when the image has been modified by the user and has not yet been saved.
- NLA Bake fix.
- Fix Generic character import.
- Spring rig panels show if character is invalid for spring rigging.
- Expression drivers for bones only apply to CC4 Ext and Std profiles
- Bone drivers for direct visemes Ah and Oh added.
- Viseme bone drivers now excluded when Jaw drivers are disabled.
- Export Rigified motion and Unity T-Pose generation fix.
### 1.6.0
- Rigifying character keeps meta-rig and allows for Re-Rigifying the control rig from the meta-rig.
- Useful for re-aligning bones, re-positioning face rig, etc...
- First draft of (optional) Dual Specular skin shader (Eevee & Cycles) with specular micro details.
- Added build options to generate drivers for Jaw, Eyes and Head bones from facial expression shape keys.
- Added build option to generate drivers for all expression shape keys driven from the body mesh shape keys.
- Which means only the expressions on the body mesh need to be updated/animated.
- Fix to turn off vertex colours in hair materials when hair mesh has blank vertex colour data (i.e. all zero).
- Facial Expression shape key value range expanded to -1.5 - 1.5 (except for eye look shape keys)
- Characters exported with Mouth Open as Morph, now correctly detects the body mesh.
- Fix to support sphere colliders in collisions shapes.
- Some additional lighting arrangements: Authority and Blur Warm.
### 1.5.8.5
- Fixes:
- Fix to empty transforms or deleted objects in export.
- Fix to transfer vertex weights leaving working copies behind.
- Some object management UI corrections.
- Fix to bake path when exporting character converted from generic with materials added after conversion.
- Fix to replace selected bones from hair cards.
- Fix cloth settings error in detect physics.
- Fix CC4 spring bones creation.
- Fix to exports of objects which originally had duplicate object names.
- Fix to import collider parenting crash when using Blender versions before 3.5
- Fix to UI panel in 2.93.
- Fix to collider generation in 2.93
- Fix to collider generation when Rigifying when posed.
### 1.5.8
- Spring Bones:
- Blender spring bone rigid body simulation added for spring bone hair rigs.
- Spring bone simulation controls.
- Hair spring bone chain renaming.
- Bone generation truncate and smoothing parameters.
- Added support for not quad grid poly mesh hair cards, should work with any hair mesh.
- Rigid body colliders for the spring bones that use the collision shapes from character creator.
- Rigify update for spring bone system.
- FK, IK and tweak bones for spring rigs.
- Rigify and spring bone UI updates.
- Baking spring bone simulation and animation into new animations
- Exporting rigified spring bone characters and animations, including the rigid body simulation as animation.
- Cloth Physics overhaul:
- Physics UI update.
- Better mapping of PhysX weight map to blender vertex pin weights.
- Physics presets updates, mass, tension and bending to better simulate the cloth type and work more consistently with external forces.
- Cloth physics preset detection on import.
- UI tools for point cache baking.
- Fixes to weightmap paint mode resetting texture.
- Browse button for painted weightmap, so you can find it.
- Weightmap assignment fix for materials with the same base name.
- Sculpt / Mesh:
- Character geometry transfer to shape-keys function.
- Character Objects:
- Add object to character (from another character) now copies the object into the new character.
- Transfer weight maps now works when posed to effectively parent in place target mesh.
- Other:
- Fix to eye close slider.
### 1.5.7
- Hair bone de-duplication.
- Bones from grease pencil lines or hair card generation now replaces (matching) existing bones.
- Grease Pencil lines generated only from active grease pencil layer, allowing for better organization.
- Added some color space fallbacks when using different color space configurations.
### 1.5.6
- Relative wrinkle strengths for individual wrinkle maps implemented.
- Overall wrinkle strength and curve power slider added the head material parameters.
- Competing wrinkle maps now use additive blending to solve overlap.
- Brow correction added for brow raise + brow compress wrinkles.
- Generated images not yet saves are autosaved for export (so they get included).
- Add custom bone function for hair rigging.
### 1.5.5
- Flow maps added to wrinkle map system.
- Better texture limiting for the head material.
- Fix to export crash when a texture field is missing in the JSON data.
- Corrupted JSON data detection and error report on import/build.
- In some cases resetting the collision shapes in CC4 will fix corrupted JSON data.
### 1.5.4
- Wrinkle Map system implemented.
- Characters with wrinkle maps will setup wrinkle shaders in the head material automatically.
- Preferences for Build Wrinkle Maps.
- OpenColorIO ACES color space support.
- Preferences for ACES color space overrides.
- Optional Texture Packing and Texture limits added to reduce number of textures in imported materals.
- Some systems can have very low texture limits (i.e. only 8 on some OSX systems) this can help import full CC4 characters.
- Preferences for pack and/or limit textures.
- Body sculpting updated:
- All sculpting modes work on a copy of the character.
- Multi-res applied base shape copied back to original character in a way that preserves existing shape keys.
- AO Map added to baking, layers and export.
- Additional strength, definition and mix mode controls added to layer ui.
- Spring Bone Hair Rigging (Cloth Rigging to follow)
- Added initial Hair curve extraction from hair cards.
- Spring bone hair generation from selected hair cards or greased pencil lines on surface.
- Hair card weight binding fine tuning controls.
- Hair cards are weighted individualy to neighboring bones and uniformly across to avoid lateral stretch and behaves more like cloth physics implementations.
- Spring bone generation from grease pencil lines.
- Fixes:
- Some extra transparency material detection.
- Fix to hand & finger bone roll alignment when bind pose has arms and hands at a steep downward angle.
- Fix to partial material name matching errors from ActorCore and AccuRig.
- Export bake socket fix for Blender 3.4+.
- Shapekey locks will be disabled and all shapekeys reset to zero on character export.
### 1.5.3
- Fix to retarget baking in Blender 3.4 not baking pose bones to rigify armature action.
- Fix to Rigify motion export bone root name.
### 1.5.2
- Rigify IK-FK influence controls replicated in Rigging panel.
- Fix to material setup error caused by missing normal map data.
### 1.5.1
- Fix to Generic character export.
- Fix to Generic converted character export.
### 1.5.0
- Rigify export mesh and/or animation overhaul.
- Smoothing groups added to export file dialog options.
- Support for CC4 Plugin, facial expression and viseme data export.
- Fix to legacy hair detection & scalp detection.
- Very slight subsurface added to scalp to prevent the dark/blueish artifacts on skin.
- Fix to bump maps connecting to normal sockets.
- Eye limbus darkness recalculated.
- Initial attempt at exporting Blender 3.3 Curve hair on characters via Alembic export.
### 1.4.9
- Fix to embedded image correction and image filepath comparisons.
- Fix to basic material texture loading.
- Convert to Accessory function added to Object Management.
### 1.4.8
- Adding existing RL material to a new character import will copy the material data.
### 1.4.7
- Match existing materials (for AccuRig imports) button added to Create tab and Rigify info pane.
- Attempts to assign existing materials to the AccuRig import that match the original export to AccurRig.
- For a Blender > AccuRig / ActorCore > Blender round trip workflow.
- Eye occlusion (Eevee) color darkened.
- Hair shader (Eevee) Specular blend added (Base surface Specularity -> Anisotropic Specularity)
### 1.4.6
- Fix for palm control location with less than 5 fingers.
### 1.4.5
- Fix for ActorCore / AccuRig import detection.
### 1.4.4
- Missing bone chains from CC3 rig will hide corresponding control rig bones (e.g. missing fingers)
- Export texture baking no longer duplicates baking on same materials across different objects.
- Export fix to CC3/4 when material name starts with a digit.
- Fix to import/convert of multiple generic GLTF/GLB props or characters.
- Fix to exporting Roughness and Metallic texture strength (should always be 100).
### 1.4.3
- Export bake maps now correctly sets bit depth and no alpha channel. Opacity texture bakes should work now.
- Convert generic character from objects to props added.
- Auto converts generic imports where possible, when using the import character button.
- Texture and material de-duplication (optional)
- ActorCore rigify accessory fix.
- CC/iC Create tab and panels added.
- Physics & Object Management moved to CC/iC Create.
- Multi-res sculpt & bake helper tools panels added.
### 1.4.2
- Separate Iris & Sclera emission colors for parallax eye shader.
- Displacement calculation reworked as it behaves differently in Blender 3.2.1 (was causing triangulation artifacts)
### 1.4.1
- Fixes
- Export T-pose action created correctly when character bind pose is already a T-pose.
- Tongue subsurface scatter parameter fixes in UI.
- Hair material UI corrected in Blender 3.2.1 (caused by renamed icon enums)
- Accessory bones now copy to rigify rig.
### 1.4.0
- Physics JSON changes and additions export.
- Unity T-pose export fix.
- Body Collision Mesh fix.
- Unity export logic fixes.
- Resize weightmap function.
- Increment/decrement paint strenth weightmap buttons.
- Image path writeback fix.
### 1.3.9
- Full Json data generation added.
- Rigify Toe IK Stretch Fix for Blender 3.1+
- Convert to Non-standard function.
- Convert from Generic character to Reallusion material based non-standard character.
- Export baking fixes and default PBR export support added.
### 1.3.8
- UI naming update.
- Repository rename to **cc_blender_tools** (title **CC/iC Blender Tools**), old repo links still apply.
- Initial support for exporting _any_ non-standard characters to CC4.
- Character should be aligned -Y forward, Z up for correct bone translation in CC4.
- Json data constructed on export to try and reconstruct materials using the **CC4 Blender Tools Plugin**.
- Materials must be based on Principled BSDF otherwise only texture name matching is possible.
- non-standard characters rigged with Auto-Rig Pro will (try to) invoke the ARP export operator (if installed)
- ARP export operator cleans up the rig and leaves only the relevent deformation bones.
- Import functions expanded to allow import from FBX, GLTF and VRM for non-standard characters.
- These are **not** considered to be CC/iC characters and have no material parameter, rigging or physics options.
### 1.3.7
- Iris Color and Iris Cloudy Color added.
- Tool bar tab renamed from **CC3** to **CC/iC**
- Some UI button name changes.
### 1.3.6
- Rigify
- Finger roll alignment fixed. All fingers now have exactly the same local bend axis.
- Disables all physics modifiers non-contributing armatures and meshes during retarget baking to speed it up a bit.
- Physics:
- Low poly (1/8th) Collision Body mesh created from decimating a copy of the Body mesh and removing the eyelashes.
- Hair would easily get trapped in the eyelashes and a lower poly collision mesh should speed up the cloth simulation.
- PhysX weight maps normalized, provides a more consistent and controllable simulation across different weight maps.
- Tweaked some cloth simulation parameters.
- Smart Hair meshes in particular should simulate better now.
- Unity:
- Added animation export options (Actions or Strips)
### 1.3.5
- Fix to shape-key action name matching.
### 1.3.4
- Rigify Retargeting:
- GameBase animation retargeting to CC Rigified Rig.
- (Experimental) Mixamo animation retargeting to CC Rigified Rig.
- Facial Expression Shape-key animation retargeting from source animation to CC character.
- Shape-key NLA baking added (optional).
- Materials:
- Diffuse Color, Hue, Saturation & Brightness parameters for Skin and Hair materials.
- Exporting:
- **Export to CC3** button renamed to **Export Morph Target** when editing OBJ imports with ObjKey.
- When **Export** button is disabled: The reason is displayed under the button.
- Export button Key check is overridable in the add-on preferences.
- Other Changes:
- Fix to ActorCore character (ActorCore website download) animation retargeting.
- Basic face rig generated from full face rig and has the same jaw & eye controls and parameters.
- Jaw pivot retargeting fixes.
- Palm bones no longer affect deformation or generate vertex weights.
- Crash fixes importing characters with very short names.
### 1.3.3
- CC/iC/ActorCore animation retargeting to CC Rigified Rig.
- Preview and Bake functions for retargeted animations.
- Arms, Legs, Heel & Height adjustments.
- Source armature and action selector, filtering by Armature name.
- Animation importer, import multiple animations in one go.
- Renames actions and armatures to match file names.
- Optionally removes meshes & materials.
- Bake control rig & retargeted animations to Unity.
- Export Rigified character to Unity.
- Basic face rig drivers for __Eye Look__ blend shapes. (If character has ExPlus blend shapes)
- GameBase to Rigify support.
### 1.3.2
- Face rig Automatic Weight failure detection and (some) auto-correction.
- Support for Voxel Heat Diffuse Skinning add-on for voxel weight mapping of the face rig.
### 1.3.1
- Optional two stage Rigify process to allow modifications to the meta-rig.
- Optional basic face rigging.
- Support for ActorCore character Rigging. (Only with basic face rigging)
- Support for G3 character generation, which includes Toon base characters. (i.e. the old CC3 base)
- Support for rigging G3Plus, G3, ActorCore characters exported from iClone. (iClone exports with no bone name prefix)
- Some Control Widget fixes and scaling.
- Better character Generation check on import for exports without JSON data.
### 1.3.0
- Rigify function added.
- Character objects are bound to the Rigify control rig.
- Vertex weights are remapped to Rigify deformation bones.
### 1.2.2
- Object Management panel
- All round-trip/Export object management functions moved to this panel.
- Including checks and clean up.
- Normalize weights function added.
- Bake/Combine Bump maps into Normal maps function added to Material Parameters panel.
- Highlight shift added to Eevee hair shader.
### 1.2.1
- Some fixes to exporting additional objects with character.
- Added a 'Check Export' function to identify potential problems with the export.
### 1.2.0
- Added initial export to Unity project function.
- Round-trip/Unity export additions and fixes.
- Added baking raw metallic and roughness values into textures, when no textures connected, when exporting.
- Modifiers removed from eye parts when exporting that prevented exporting blend shapes.
- Modifiers applied on FBX exports.
- Armature set to Pose mode on export, otherwise skeleton/bind-pose imports to CC3 incorrectly.
- Some file path fixes when baking new textures with exports.
- Fixed skin micro-smoothness calculation causing smoothness seams between head and body materials.
- Fixed UI updating wrong unmasked micro smoothness parameter.
- Blender 3.0+ subsurface scattering method set to Christensen-Burley, rather than Random-Walk, which does not work well with hair transparencies.
- Color mixers for actor core/color masked materials prevented from generating negative/zero color values which could affect diffuse lighting.
### 1.1.9
- Fixed PBR eye material crash.
### 1.1.8
- Texture Channel Mixer added, primarily for alteration of Actor Core characters with RGB and Color ID masks, but can be used with all CC3 based materials.
- Cycles hair anisotropy reworked.
- Cycles parameter tweaks:
- Brighter iris settings for SSR eyes.
- Hair subsurface reduced (May want to turn it off completely, there can be too many artifacts generated in the hair.)
- Skin subsurface slightly increased.
### 1.1.7
- Cornea material detection crash fix.
### 1.1.6
- Baking add-on compatibility update.
### 1.1.5
- Character and Object operators (**Character Settings** Panel):
- Add object to character, with parenting and armature modifier.
- Convert and add materials to character to use with material parameters and export write back.
- Transfer body vertex weights to object.
- Clean up object and material character data.
- Export back to CC3 will include Texture & Json data for any new objects added with these operators.
- Fix to Subsurface scattering for Blender 3.0 Cycles renders.
- Cycles adjustment settings added to preferences for user fine tuning of subsurface material parameters for Cycles.
### 1.1.4
- Added more support for exporting from characters with embedded textures.
### 1.1.3
- Baking fix for Blender versions 2.83 - 2.91
- (Experimental) Added operators to add new objects to the character data.
- See Character Settings panel with new object selected.
### 1.1.2
- Export accessory button no longer locked to character.
- Some import/export folder logic changed to try and cope with project folder & files being moved.
- Added custom texture node baking on export to CC3, including baking bump maps into normal maps on export.
- If nodes are used to modify (or replace) texture inputs to material shaders, those nodes can be baked into the texture channel on export. This assumes the mesh has a valid UV map.
- Bump maps can be baked into normal channels. Typically CC3 will only allow Normal maps OR bump maps for a material, not both, so an option has been added to combine them into just the normal map.
### 1.1.1
- Fix to crash from multiple character imports from iClone.
- Note: Exporting multiple characters in one Fbx from iClone to Blender is not fully supported.
- Characters should be exported individually.
- If multiple characters are detected then a warning pop-up will be displayed.
### 1.1.0
- Updated export function to generate compatible Fbx file for CC3 re-import with FbxKey and to write back json material parameter and texture information. To be used in tandem with [Blender Importer Plugin for CC3 3.44](https://github.com/soupday/CC3-Blender-Tools-Plugin) for full round-trip character editing in Blender.
- Import/Export Interface simplified.
- If character has Fbxkey then character is setup for editing. (i.e. Shapekeys locked to basis)
- Otherwise (character is posed or has animation) character is setup for rendering.
- Only Fbxkey character can be exported back to CC3.
- Optional Json and Texture write back for exports.
- Optional teeth rotation fix that affects some older generated characters when importing back into CC3.
- Bake on export function added to bake custom material nodes connected to master shader's texture map sockets into textures to include when re-importing back into CC3.
- Additional objects can be selected for exporting with the character, but must be properly parented and weighted with an armature modifier. Otherwise CC3 will ignore them.
- Some property and parameter fixes.
### 1.0.3
- First attempt at a single material parallax eye shader added. Which does not use SSR or transparency and thus can receive full shadows and subsurface scattering in Eevee.
### 1.0.2
- Fixed Eevee subsurface scattering settings:
- Reworked shaders to allow for direct application of subsurface radius to Principled BSDF nodes.
- Only the default values in the subsurface radius socket are used in Eevee rendering.
- As such, Eevee does not support inputs to subsurface radius and so shader and parameter code needed to be re-written to accomodate this.
- Cycles unaffected by this.
- Fixed node group upgrade code that incorrectly renamed existing node groups and did not properly replace old shader/node groups with new ones in existing blend files.
### 1.0.1
- Added render target preferences setting for Cycles and Eevee.
- Added cycles specific shaders for hair, tear-line and eye occlusion.
### 1.0.0
- Moved all shaders over to new shader model.
- Streamlined parameter and shader code to be data driven, rather than hard coded.
- Character, Object and Material parameters now stored independently for each character import.
- Json data parser to automatically set up all shader parameters.
### 0.7.4
- New eye shader model.
### 0.7.3
- New teeth and tongue shader model.
### 0.7.2
- New skin and head shader model.
### 0.7.1
- Back ported the more advanced Eye Occlusion shader from the Unity HDRP setup.
- Added displacement modifiers & parameters to Eye Occlusion and Tearline objects.
- Initial support for ActorCore models type C/D/D+.
### 0.6.3
- Fixed 'Export as accessory' correctly exporting as .obj when character was imported from an .obj file.
(And not exporting as .fbx with the wrong file extension)
### 0.6.2
- Lighting setups set Cycles transparent bounces set to 50 to accomodate Smart Hair mesh density.
- Lighting setups do not delete existing lights or camera, but they will hide them.
- Material setup now properly detects Game Base objects (i.e. Converted to Game Base in CC3 before exporting to Blender).
- Each material now maintains it's own set of parameters.
- Updating material parameters in **linked** mode will change the same parameters on all materials of the same type.
- Updating parameters in **selected** only mode will only change the parameter for that one material.
### 0.5.2
- Applies IOR shader input setting when building materials.
- Exposed some build preferences in the Build Settings panel.
- Enabled SSR and refraction when importing with refractive eyes.
- Auto updater now targets Main branch for current build.
### 0.5.1
- Fixed problem appending duplicate displacment map images.
- Fixed not removing eye displacement modifiers on rebuild.
- Added eye occlusion hardness parameter.
### 0.5.0
- Refractive Eyes:
- Iris refractive transmission with depth control and pupil size parameters.
- Limbus parameters.
- IOR and refractive depth parameters.
- Blood vessel and iris bump normals.
- Option in preferences to generate old eyes instead.
- Skin roughness power parameter added.
### 0.4.3
- Corrected an issue where the opacity maps were ignored in favour of diffuse alpha channels.
- Added opacity parameters for hair, scalp and eyelashes.
- Added roughness and specular parameters for eyelashes.
- Fixed a crash calling the import operator from from script.
- Added auto update scripts.
### 0.4.1
- Full smart hair support.
- Hair and scalp hints expanded to cover the smart hair system and moved to the preferences.
- Parameter changes update only that parameter in the imported or selected objects materials.
- Fake anisotropic highlights add to smart hair shader. (Can disable in the preferences.)
- Fake bump normals can be generated from the diffuse map if there is no normal or bump map present. (Can disable in the preferences.)
- Animation ranges only changed if physics enabled.
- Build settings and material parameters separated into their own interface panels.
- Build settings now applicable by material and the object and material build types as detected by the add-on are exposed and editable so you can fix them if it gets them wrong.
- Material parameters are context sensitive to the currently active object and material.
- Material parameters grouped into sections.
- Detects smart hair material or normal hair material and only shows relevant parameters.
- Option in preferences to gamma correct smart hair colours so they behave more like the colours in CC3.
### 0.3.0
- Fix to hair mesh detection with new smart hair system.
### 0.2.2 Alpha
- When no texture maps are present for an advanced node group, does not generate the node group.
- When exporting morph characters with .fbxkey or .objkey files, the key file is copied along with the export.
- Function added to reset preferences to default values.
- Alpha blend settings and back face culling settings can be applied to materials in the object now.
- Option to apply alpha blend settings to whole object(s) or just active materal.
- Remembers the applied alpha blend settings and re-applies when rebuilding materials.
- Option to pick Scalp Material.
- Only scans once on import for hair object and scalp material, so it can be cleared if it gets it wrong and wont keep putting it back.
- FBX import keeps track of the objects as well as the armature in case the armature is replaced.
- Physics support added:
- Uses the physX weight maps to auto-generate vertex pin weights for cloth/hair physics (Optional)
- Automatically sets up cloth/hair physics modifiers (Optional)
- Physics cloth presets can be applied to the selected object(s) and are remembered with rebuilding materials.
- Weightmaps can be added/removed to the individual materials of the objects.
- Weight map painting added.
- Saving of modified weight maps and Deleting weight map functions added.
@@ -0,0 +1,297 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
if "bpy" in locals():
import importlib
importlib.reload(addon_updater_ops)
importlib.reload(preferences)
importlib.reload(vars)
importlib.reload(params)
importlib.reload(utils)
importlib.reload(lib)
importlib.reload(cc)
importlib.reload(jsonutils)
importlib.reload(nodeutils)
importlib.reload(imageutils)
importlib.reload(channel_mixer)
importlib.reload(materials)
importlib.reload(characters)
importlib.reload(hik)
importlib.reload(meshutils)
importlib.reload(modifiers)
importlib.reload(shaders)
importlib.reload(basic)
importlib.reload(physics)
importlib.reload(bake)
importlib.reload(panels)
importlib.reload(properties)
importlib.reload(scene)
importlib.reload(exporter)
importlib.reload(importer)
importlib.reload(geom)
importlib.reload(bones)
importlib.reload(rigidbody)
importlib.reload(springbones)
importlib.reload(drivers)
importlib.reload(wrinkle)
importlib.reload(facerig_data)
importlib.reload(facerig)
importlib.reload(rigify_mapping_data)
importlib.reload(rigging)
importlib.reload(rigutils)
importlib.reload(sculpting)
importlib.reload(hair)
importlib.reload(colorspace)
importlib.reload(world)
importlib.reload(normal)
importlib.reload(link)
importlib.reload(proportion)
importlib.reload(iconutils)
importlib.reload(rlx)
import bpy
from . import addon_updater_ops
from . import preferences
from . import vars
from . import params
from . import utils
from . import lib
from . import cc
from . import jsonutils
from . import nodeutils
from . import imageutils
from . import channel_mixer
from . import materials
from . import characters
from . import hik
from . import meshutils
from . import modifiers
from . import shaders
from . import basic
from . import physics
from . import bake
from . import panels
from . import properties
from . import scene
from . import exporter
from . import importer
from . import geom
from . import bones
from . import rigidbody
from . import springbones
from . import drivers
from . import wrinkle
from . import facerig_data
from . import facerig
from . import rigify_mapping_data
from . import rigging
from . import rigutils
from . import sculpting
from . import hair
from . import colorspace
from . import world
from . import normal
from . import link
from . import proportion
from . import iconutils
from . import rlx
bl_info = {
"name": "CC/iC Tools",
"author": "Victor Soupday",
"version": (2, 3, 4),
"blender": (3, 4, 1),
"category": "Characters",
"location": "3D View > Properties > CC/iC Pipeline",
"description": "Automatic import and material setup of CC3/4-iClone7/8 characters.",
"wiki_url": "https://soupday.github.io/cc_blender_tools/index.html",
"tracker_url": "https://github.com/soupday/cc_blender_tools/issues",
}
vars.set_version_string(bl_info)
classes = (
preferences.CC3ToolsAddonPreferences,
preferences.MATERIAL_UL_weightedmatslots,
channel_mixer.CC3RGBMixer,
channel_mixer.CC3IDMixer,
channel_mixer.CC3MixerSettings,
properties.CCICLinkProps,
properties.CCICBakeCache,
properties.CCICBakeMaterialSettings,
properties.CCICBakeProps,
properties.CC3ActionList,
properties.CC3ArmatureList,
properties.CCIC_UI_MixItem,
properties.CCIC_UI_MixList,
properties.CCICActionOptions,
properties.CC3HeadParameters,
properties.CC3SkinParameters,
properties.CC3EyeParameters,
properties.CC3EyeOcclusionParameters,
properties.CC3TearlineParameters,
properties.CC3TeethParameters,
properties.CC3TongueParameters,
properties.CC3HairParameters,
properties.CC3PBRParameters,
properties.CC3SSSParameters,
properties.CC3BasicParameters,
properties.CC3TextureMapping,
properties.CC3EyeMaterialCache,
properties.CC3EyeOcclusionMaterialCache,
properties.CC3TearlineMaterialCache,
properties.CC3TeethMaterialCache,
properties.CC3TongueMaterialCache,
properties.CC3HairMaterialCache,
properties.CC3HeadMaterialCache,
properties.CC3SkinMaterialCache,
properties.CC3PBRMaterialCache,
properties.CC3SSSMaterialCache,
properties.CCICExpressionData,
properties.CC3ObjectCache,
properties.CCICActionStore,
properties.CC3CharacterCache,
properties.CC3ImportProps,
importer.CC3Import,
importer.CC3ImportAnimations,
exporter.CC3Export,
scene.CC3Scene,
bake.CC3BakeOperator,
rigging.CC3Rigifier,
rigging.CC3RigifierModal,
bake.CCICBakeSettings,
bake.CCICBaker,
bake.CCICJpegify,
springbones.CC3OperatorSpringBones,
physics.CC3OperatorPhysics,
materials.CC3OperatorMaterial,
characters.CC3OperatorCharacter,
characters.CCICWeightTransferBlend,
properties.CC3OperatorProperties,
preferences.CC3OperatorPreferences,
channel_mixer.CC3OperatorChannelMixer,
characters.CC3OperatorTransferCharacterGeometry,
characters.CC3OperatorTransferMeshGeometry,
characters.CCICCharacterRename,
characters.CCICCharacterConvertGeneric,
sculpting.CC3OperatorSculpt,
sculpting.CC3OperatorSculptExport,
hair.CC3OperatorHair,
hair.CC3ExportHair,
link.CCICDataLink,
link.CCICLinkConfirmDialog,
link.CCICLinkTest,
characters.CCICCharacterLink,
proportion.CCICCharacterProportions,
rigutils.CCICMotionSetRename,
rigutils.CCICMotionSetInfo,
rigutils.CCICRigUtils,
rigutils.CCIC_ImportMixBones_UL_List,
rigutils.CCIC_RigMixBones_UL_List,
rigutils.CCICActionImportFunctions,
rigutils.CCICActionImportOptions,
facerig.CCICImportARKitCSV,
panels.ARMATURE_UL_List,
panels.ACTION_UL_List,
panels.ACTION_SET_UL_List,
panels.UNITY_ACTION_UL_List,
# pipeline panels
panels.CC3ToolsPipelineImportPanel,
panels.CC3ToolsPipelineExportPanel,
panels.CC3CharacterSettingsPanel,
panels.CC3MaterialParametersPanel,
panels.CC3RigifyPanel,
panels.CCICBakePanel,
panels.CC3PipelineScenePanel,
# NLA panels
panels.CCICNLASetsPanel,
panels.CCICNLABakePanel,
# create panels
panels.CC3ToolsCreatePanel,
panels.CC3ObjectManagementPanel,
panels.CC3WeightPaintPanel,
panels.CC3ToolsPhysicsPanel,
panels.CC3SpringRigPanel,
panels.CC3ToolsSculptingPanel,
panels.CCICProportionPanel,
panels.CC3HairPanel,
# link panels
panels.CCICDataLinkPanel,
panels.CCICAnimationToolsPanel,
panels.CCICLinkScenePanel,
# control panels
panels.CC3SpringControlPanel,
# test panels
panels.CC3ToolsUtilityPanel,
)
def register():
addon_updater_ops.register(bl_info)
for cls in classes:
bpy.utils.register_class(cls)
iconutils.register()
bpy.types.Scene.CC3ImportProps = bpy.props.PointerProperty(type=properties.CC3ImportProps)
bpy.types.Scene.CCICBakeProps = bpy.props.PointerProperty(type=properties.CCICBakeProps)
bpy.types.Scene.CCICLinkProps = bpy.props.PointerProperty(type=properties.CCICLinkProps)
bpy.types.TOPBAR_MT_file_import.append(importer.menu_func_import)
bpy.types.TOPBAR_MT_file_import.append(importer.menu_func_import_animation)
bpy.types.TOPBAR_MT_file_export.append(exporter.menu_func_export)
if link.disconnect not in bpy.app.handlers.load_pre:
bpy.app.handlers.load_pre.append(link.disconnect)
if link.reconnect not in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.append(link.reconnect)
bpy.app.timers.register(link.reconnect, first_interval=0.5, persistent=False)
def unregister():
link.disconnect()
addon_updater_ops.unregister()
bpy.types.TOPBAR_MT_file_import.remove(importer.menu_func_import)
bpy.types.TOPBAR_MT_file_import.remove(importer.menu_func_import_animation)
bpy.types.TOPBAR_MT_file_export.remove(exporter.menu_func_export)
for cls in classes:
bpy.utils.unregister_class(cls)
iconutils.unregister()
del(bpy.types.Scene.CC3ImportProps)
del(bpy.types.Scene.CCICBakeProps)
del(bpy.types.Scene.CCICLinkProps)
if link.disconnect in bpy.app.handlers.load_pre:
bpy.app.handlers.load_pre.remove(link.disconnect)
if link.reconnect in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.remove(link.reconnect)
@@ -0,0 +1,500 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
import os
from . import materials, nodeutils, imageutils, jsonutils, params, lib, utils, vars
def reset_shader(nodes, links, shader_label, shader_name):
bsdf_id = "(" + str(shader_name) + "_BSDF)"
bsdf_node: bpy.types.Node = None
output_node: bpy.types.Node = None
links.clear()
for n in nodes:
if n.type == "BSDF_PRINCIPLED":
if not bsdf_node:
utils.log_info("Keeping old BSDF: " + n.name)
bsdf_node = n
else:
nodes.remove(n)
elif n.type == "OUTPUT_MATERIAL":
if output_node:
nodes.remove(n)
else:
output_node = n
else:
nodes.remove(n)
if not bsdf_node:
bsdf_node = nodes.new("ShaderNodeBsdfPrincipled")
bsdf_node.name = utils.unique_name(bsdf_id)
bsdf_node.label = shader_label
bsdf_node.width = 240
utils.log_info("Creating new BSDF: " + bsdf_node.name)
if not output_node:
output_node = nodes.new("ShaderNodeOutputMaterial")
bsdf_node.location = (0,0)
output_node.location = (400, 0)
emission_socket = nodeutils.input_socket(bsdf_node, "Emission")
nodeutils.set_node_input_value(bsdf_node, emission_socket, (0,0,0))
if utils.B400():
emission_strength_socket = nodeutils.input_socket(bsdf_node, "Emission Strength")
nodeutils.set_node_input_value(bsdf_node, emission_strength_socket, 0)
# connect the shader to the output
nodeutils.link_nodes(links, bsdf_node, "BSDF", output_node, "Surface")
return bsdf_node
def connect_tearline_material(obj, mat, mat_json, processed_images):
props = vars.props()
chr_cache = props.get_character_cache(obj, mat)
parameters = chr_cache.basic_parameters
obj_cache = chr_cache.get_object_cache(obj)
mat_cache = chr_cache.get_material_cache(mat)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
bsdf_node = reset_shader(nodes, links, "Tearline Shader", "basic_tearline")
base_color_socket = nodeutils.input_socket(bsdf_node, "Base Color")
metallic_socket = nodeutils.input_socket(bsdf_node, "Metallic")
specular_socket = nodeutils.input_socket(bsdf_node, "Specular")
roughness_socket = nodeutils.input_socket(bsdf_node, "Roughness")
alpha_socket = nodeutils.input_socket(bsdf_node, "Alpha")
nodeutils.set_node_input_value(bsdf_node, base_color_socket, (1.0, 1.0, 1.0, 1.0))
nodeutils.set_node_input_value(bsdf_node, metallic_socket, 1.0)
nodeutils.set_node_input_value(bsdf_node, specular_socket, 1.0)
nodeutils.set_node_input_value(bsdf_node, roughness_socket, parameters.tearline_roughness)
nodeutils.set_node_input_value(bsdf_node, alpha_socket, parameters.tearline_alpha)
bsdf_node.name = utils.unique_name("eye_tearline_shader")
materials.set_material_alpha(mat, "BLEND", shadows=False, refraction=True)
def connect_eye_occlusion_material(obj, mat, mat_json, processed_images):
props = vars.props()
chr_cache = props.get_character_cache(obj, mat)
parameters = chr_cache.basic_parameters
obj_cache = chr_cache.get_object_cache(obj)
mat_cache = chr_cache.get_material_cache(mat)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
bsdf_node = reset_shader(nodes, links, "Eye Occlusion Shader", "basic_eye_occlusion")
bsdf_node.name = utils.unique_name("eye_occlusion_shader")
base_color_socket = nodeutils.input_socket(bsdf_node, "Base Color")
metallic_socket = nodeutils.input_socket(bsdf_node, "Metallic")
specular_socket = nodeutils.input_socket(bsdf_node, "Specular")
roughness_socket = nodeutils.input_socket(bsdf_node, "Roughness")
alpha_socket = nodeutils.input_socket(bsdf_node, "Alpha")
nodeutils.set_node_input_value(bsdf_node, base_color_socket, (0,0,0,1))
nodeutils.set_node_input_value(bsdf_node, metallic_socket, 0.0)
nodeutils.set_node_input_value(bsdf_node, specular_socket, 0.0)
nodeutils.set_node_input_value(bsdf_node, roughness_socket, 1.0)
nodeutils.reset_cursor()
# groups
group = lib.get_node_group("eye_occlusion_mask")
occ_node = nodeutils.make_node_group_node(nodes, group, "Eye Occulsion Alpha", "eye_occlusion_mask")
# values
nodeutils.set_node_input_value(occ_node, "Strength", parameters.eye_occlusion)
nodeutils.set_node_input_value(occ_node, "Hardness", parameters.eye_occlusion_power)
# links
nodeutils.link_nodes(links, occ_node, "Alpha", bsdf_node, alpha_socket)
materials.set_material_alpha(mat, "BLEND", shadows=False)
def connect_basic_eye_material(obj, mat, mat_json, processed_images):
props = vars.props()
chr_cache = props.get_character_cache(obj, mat)
parameters = chr_cache.basic_parameters
obj_cache = chr_cache.get_object_cache(obj)
mat_cache = chr_cache.get_material_cache(mat)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
bsdf_node = reset_shader(nodes, links, "Eye Shader", "basic_eye")
base_color_socket = nodeutils.input_socket(bsdf_node, "Base Color")
metallic_socket = nodeutils.input_socket(bsdf_node, "Metallic")
specular_socket = nodeutils.input_socket(bsdf_node, "Specular")
roughness_socket = nodeutils.input_socket(bsdf_node, "Roughness")
alpha_socket = nodeutils.input_socket(bsdf_node, "Alpha")
normal_socket = nodeutils.input_socket(bsdf_node, "Normal")
clearcoat_socket = nodeutils.input_socket(bsdf_node, "Clearcoat")
clearcoat_roughness_socket = nodeutils.input_socket(bsdf_node, "Clearcoat Roughness")
# Base Color
#
nodeutils.reset_cursor()
diffuse_image = find_material_image_basic(mat, "DIFFUSE", mat_json, processed_images)
if diffuse_image is not None:
diffuse_node = nodeutils.make_image_node(nodes, diffuse_image, "(DIFFUSE)")
nodeutils.advance_cursor(1.0)
hsv_node = nodeutils.make_shader_node(nodes, "ShaderNodeHueSaturation", 0.6)
hsv_node.label = "HSV"
hsv_node.name = utils.unique_name("eye_basic_hsv")
nodeutils.set_node_input_value(hsv_node, "Value", parameters.eye_brightness)
# links
nodeutils.link_nodes(links, diffuse_node, "Color", hsv_node, "Color")
nodeutils.link_nodes(links, hsv_node, "Color", bsdf_node, base_color_socket)
# Metallic
#
nodeutils.reset_cursor()
metallic_node = nodeutils.make_value_node(nodes, "Eye Metallic", "eye_metallic", 0.0)
nodeutils.link_nodes(links, metallic_node, "Value", bsdf_node, metallic_socket)
# Specular
#
nodeutils.reset_cursor()
specular_node = nodeutils.make_value_node(nodes, "Eye Specular", "eye_specular", parameters.eye_specular)
nodeutils.link_nodes(links, specular_node, "Value", bsdf_node, specular_socket)
# Roughness
#
nodeutils.reset_cursor()
roughness_node = nodeutils.make_value_node(nodes, "Eye Roughness", "eye_roughness", parameters.eye_roughness)
nodeutils.link_nodes(links, roughness_node, "Value", bsdf_node, roughness_socket)
# Alpha
#
nodeutils.set_node_input_value(bsdf_node, alpha_socket, 1.0)
# Normal
#
nodeutils.reset_cursor()
normal_image = find_material_image_basic(mat, "SCLERANORMAL", mat_json, processed_images)
if normal_image is not None:
strength_node = nodeutils.make_value_node(nodes, "Normal Strength", "eye_normal", parameters.eye_normal)
normal_node = nodeutils.make_image_node(nodes, normal_image, "(SCLERANORMAL)")
nodeutils.advance_cursor()
normalmap_node = nodeutils.make_shader_node(nodes, "ShaderNodeNormalMap", 0.6)
nodeutils.link_nodes(links, strength_node, "Value", normalmap_node, "Strength")
nodeutils.link_nodes(links, normal_node, "Color", normalmap_node, "Color")
nodeutils.link_nodes(links, normalmap_node, "Normal", bsdf_node, normal_socket)
# Clearcoat
#
nodeutils.set_node_input_value(bsdf_node, clearcoat_socket, 1.0)
nodeutils.set_node_input_value(bsdf_node, clearcoat_roughness_socket, 0.15)
materials.set_material_alpha(mat, "OPAQUE")
return
def connect_basic_material(obj, mat, mat_json, processed_images):
props = vars.props()
chr_cache = props.get_character_cache(obj, mat)
parameters = chr_cache.basic_parameters
obj_cache = chr_cache.get_object_cache(obj)
mat_cache = chr_cache.get_material_cache(mat)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
bsdf_node = reset_shader(nodes, links, "Basic Shader", "basic")
base_color_socket = nodeutils.input_socket(bsdf_node, "Base Color")
metallic_socket = nodeutils.input_socket(bsdf_node, "Metallic")
specular_socket = nodeutils.input_socket(bsdf_node, "Specular")
roughness_socket = nodeutils.input_socket(bsdf_node, "Roughness")
alpha_socket = nodeutils.input_socket(bsdf_node, "Alpha")
emission_socket = nodeutils.input_socket(bsdf_node, "Emission")
emission_strength_socket = nodeutils.input_socket(bsdf_node, "Emission Strength")
normal_socket = nodeutils.input_socket(bsdf_node, "Normal")
sss_socket = nodeutils.input_socket(bsdf_node, "Subsurface")
# Base Color
#
nodeutils.reset_cursor()
diffuse_image = find_material_image_basic(mat, "DIFFUSE", mat_json, processed_images)
ao_image = find_material_image_basic(mat, "AO", mat_json, processed_images)
diffuse_node = ao_node = None
if (diffuse_image is not None):
diffuse_node = nodeutils.make_image_node(nodes, diffuse_image, "(DIFFUSE)")
if ao_image is not None:
if mat_cache.is_skin() or mat_cache.is_nails():
prop = "skin_ao"
ao_strength = parameters.skin_ao
elif mat_cache.is_hair():
prop = "hair_ao"
ao_strength = parameters.hair_ao
else:
prop = "default_ao"
ao_strength = parameters.default_ao
fac_node = nodeutils.make_value_node(nodes, "Ambient Occlusion Strength", prop, ao_strength)
ao_node = nodeutils.make_image_node(nodes, ao_image, "ao_tex")
nodeutils.advance_cursor(1.5)
nodeutils.drop_cursor(0.75)
mix_node = nodeutils.make_mixrgb_node(nodes, "MULTIPLY")
mix_node.name = utils.unique_name("AO_Mix")
mix_node.label = "AO Mix"
nodeutils.link_nodes(links, diffuse_node, "Color", mix_node, "Color1")
nodeutils.link_nodes(links, ao_node, "Color", mix_node, "Color2")
nodeutils.link_nodes(links, fac_node, "Value", mix_node, "Fac")
nodeutils.link_nodes(links, mix_node, "Color", bsdf_node, base_color_socket)
else:
nodeutils.link_nodes(links, diffuse_node, "Color", bsdf_node, base_color_socket)
# SSS
#
if mat_cache.is_skin():
nodeutils.set_node_input_value(bsdf_node, sss_socket, 0.25)
else:
nodeutils.set_node_input_value(bsdf_node, sss_socket, 0)
# Metallic
#
nodeutils.reset_cursor()
metallic_image = find_material_image_basic(mat, "METALLIC", mat_json, processed_images)
metallic_node = None
if metallic_image is not None:
metallic_node = nodeutils.make_image_node(nodes, metallic_image, "(METALLIC)")
nodeutils.link_nodes(links, metallic_node, "Color", bsdf_node, metallic_socket)
# Specular
#
nodeutils.reset_cursor()
specular_image = find_material_image_basic(mat, "SPECULAR", mat_json, processed_images)
mask_image = find_material_image_basic(mat, "SPECMASK", mat_json, processed_images)
prop = "none"
spec = 0.5
if mat_cache.is_skin() or mat_cache.is_nails():
prop = "skin_specular"
spec = parameters.skin_specular
elif mat_cache.is_hair():
prop = "hair_specular"
spec = parameters.hair_specular
elif mat_cache.is_scalp() or mat_cache.is_eyelash():
prop = "scalp_specular"
spec = parameters.scalp_specular
elif mat_cache.is_teeth():
prop = "teeth_specular"
spec = parameters.teeth_specular
elif mat_cache.is_tongue():
prop = "tongue_specular"
spec = parameters.tongue_specular
else:
prop = "default_specular"
spec = parameters.default_specular
specular_node = mask_node = mult_node = None
if specular_image is not None:
specular_node = nodeutils.make_image_node(nodes, specular_image, "(SPECULAR)")
nodeutils.link_nodes(links, specular_node, "Color", bsdf_node, specular_socket)
# always make a specular value node for skin or if there is a mask (but no map)
elif prop != "none":
specular_node = nodeutils.make_value_node(nodes, "Specular Strength", prop, spec)
nodeutils.link_nodes(links, specular_node, "Value", bsdf_node, specular_socket)
if mask_image is not None:
mask_node = nodeutils.make_image_node(nodes, mask_image, "(SPECMASK)")
nodeutils.advance_cursor()
mult_node = nodeutils.make_math_node(nodes, "MULTIPLY")
mult_node.name = utils.unique_name("(Specular_Mult)")
mult_node.label = "Apply Specular Mask"
if specular_node.type == "VALUE":
nodeutils.link_nodes(links, specular_node, "Value", mult_node, 0)
else:
nodeutils.link_nodes(links, specular_node, "Color", mult_node, 0)
nodeutils.link_nodes(links, mask_node, "Color", mult_node, 1)
nodeutils.link_nodes(links, mult_node, "Value", bsdf_node, specular_socket)
# Roughness
#
nodeutils.reset_cursor()
roughness_image = find_material_image_basic(mat, "ROUGHNESS", mat_json, processed_images)
roughness_node = None
if roughness_image is not None:
roughness_node = nodeutils.make_image_node(nodes, roughness_image, "(ROUGHNESS)")
if mat_cache.is_skin():
prop = "skin_roughness"
roughness = parameters.skin_roughness
elif mat_cache.is_teeth():
prop = "teeth_roughness"
roughness = parameters.teeth_roughness
elif mat_cache.is_tongue():
prop = "tongue_roughness"
roughness = parameters.tongue_roughness
else:
prop = "none"
roughness = 1
if mat_cache.material_type.startswith("SKIN"):
nodeutils.advance_cursor()
remap_node = nodeutils.make_shader_node(nodes, "ShaderNodeMapRange")
remap_node.name = utils.unique_name(prop)
nodeutils.set_node_input_value(remap_node, "To Min", roughness)
nodeutils.link_nodes(links, roughness_node, "Color", remap_node, "Value")
nodeutils.link_nodes(links, remap_node, "Result", bsdf_node, roughness_socket)
elif mat_cache.material_type.startswith("TEETH") or mat_cache.material_type == "TONGUE":
nodeutils.advance_cursor()
rmult_node = nodeutils.make_math_node(nodes, "MULTIPLY", 1, roughness)
rmult_node.name = utils.unique_name(prop)
rmult_node.label = "Roughness Remap"
nodeutils.link_nodes(links, roughness_node, "Color", rmult_node, 0)
nodeutils.link_nodes(links, rmult_node, "Value", bsdf_node, roughness_socket)
else:
nodeutils.link_nodes(links, roughness_node, "Color", bsdf_node, roughness_socket)
# Emission
#
nodeutils.reset_cursor()
emission_image = find_material_image_basic(mat,"EMISSION", mat_json, processed_images)
emission_node = None
if emission_image is not None:
emission_node = nodeutils.make_image_node(nodes, emission_image, "(EMISSION)")
nodeutils.link_nodes(links, emission_node, "Color", bsdf_node, emission_socket)
emission_strength = jsonutils.get_texture_channel_strength(mat_json, "Glow", 0.0)
nodeutils.set_node_input_value(bsdf_node, emission_strength_socket, emission_strength)
# Alpha
#
nodeutils.reset_cursor()
alpha_image = find_material_image_basic(mat, "ALPHA", mat_json, processed_images)
alpha_node = None
if alpha_image is not None:
alpha_node = nodeutils.make_image_node(nodes, alpha_image, "(ALPHA)")
dir, file = os.path.split(alpha_image.filepath)
if "_diffuse" in file.lower() or "_albedo" in file.lower():
nodeutils.link_nodes(links, alpha_node, "Alpha", bsdf_node, alpha_socket)
else:
nodeutils.link_nodes(links, alpha_node, "Color", bsdf_node, alpha_socket)
elif diffuse_node:
nodeutils.link_nodes(links, diffuse_node, "Alpha", bsdf_node, alpha_socket)
# material alpha blend settings
method = materials.determine_material_alpha(obj_cache, mat_cache, mat_json)
materials.set_material_alpha(mat, method)
# Normal
#
nodeutils.reset_cursor()
normal_strength = jsonutils.get_texture_channel_strength(mat_json, "Normal", 1.0)
normal_image = find_material_image_basic(mat, "NORMAL", mat_json, processed_images)
bump_image = find_material_image_basic(mat,"BUMP", mat_json, processed_images)
normal_node = bump_node = normalmap_node = bumpmap_node = None
if normal_image is not None:
normal_node = nodeutils.make_image_node(nodes, normal_image, "(NORMAL)")
nodeutils.advance_cursor()
normalmap_node = nodeutils.make_shader_node(nodes, "ShaderNodeNormalMap", 0.6)
nodeutils.link_nodes(links, normal_node, "Color", normalmap_node, "Color")
nodeutils.link_nodes(links, normalmap_node, "Normal", bsdf_node, normal_socket)
nodeutils.set_node_input_value(normalmap_node, "Strength", normal_strength)
if bump_image is not None:
if mat_cache.is_hair() or mat_cache.is_eyelash() or mat_cache.is_scalp():
prop = "hair_bump"
bump_strength = parameters.hair_bump
else:
prop = "default_bump"
bump_strength = parameters.default_bump
bump_strength_node = nodeutils.make_value_node(nodes, "Bump Strength", prop, bump_strength / 1000)
bump_node = nodeutils.make_image_node(nodes, bump_image, "(BUMP)")
nodeutils.advance_cursor()
bumpmap_node = nodeutils.make_shader_node(nodes, "ShaderNodeBump", 0.7)
nodeutils.advance_cursor()
nodeutils.link_nodes(links, bump_strength_node, "Value", bumpmap_node, "Distance")
nodeutils.link_nodes(links, bump_node, "Color", bumpmap_node, "Height")
if normal_image is not None:
nodeutils.link_nodes(links, normalmap_node, "Normal", bumpmap_node, "Normal")
nodeutils.link_nodes(links, bumpmap_node, "Normal", bsdf_node, normal_socket)
def find_material_image_basic(mat, tex_type, mat_json, processed_images):
json_id = imageutils.get_image_type_json_id(tex_type)
tex_json = jsonutils.get_texture_info(mat_json, json_id)
return imageutils.find_material_image(mat, tex_type, processed_images, tex_json, mat_json)
def update_basic_material(mat, mat_cache, prop):
props = vars.props()
chr_cache = props.get_character_cache(None, mat)
parameters = chr_cache.basic_parameters
scope = locals()
if mat is not None and mat.node_tree is not None:
nodes = mat.node_tree.nodes
for node in nodes:
for prop_info in params.BASIC_PROPS:
prop_name = prop_info[3]
prop_node = prop_info[2]
if prop_node == "":
prop_node = prop_name
if prop_node in node.name and (prop == "ALL" or prop == prop_name):
prop_dir = prop_info[0]
prop_socket = prop_info[1]
try:
if len(prop_info) > 5:
prop_eval = prop_info[5]
else:
prop_eval = "parameters." + prop_name
prop_value = eval(prop_eval, None, scope)
if prop_dir == "IN":
nodeutils.set_node_input_value(node, prop_socket, prop_value)
elif prop_dir == "OUT":
nodeutils.set_node_output_value(node, prop_socket, prop_value)
except Exception as e:
utils.log_error("update_basic_materials(): Unable to evaluate or set: " + prop_eval, e)
def init_basic_default(chr_cache):
props = vars.props()
parameters = chr_cache.basic_parameters
for prop_info in params.BASIC_PROPS:
prop_name = prop_info[3]
prop_default = prop_info[4]
try:
prop_eval = "parameters." + prop_name + " = " + str(prop_default)
exec(prop_eval, None, locals())
except Exception as e:
utils.log_error("init_basic_default(): Unable to set: " + prop_eval, e)
if chr_cache.is_actor_core():
chr_cache.basic_parameters.default_ao = 0.2
chr_cache.basic_parameters.default_specular = 0.2
@@ -0,0 +1,133 @@
import bpy
from mathutils import Matrix, Vector
from . import bones, utils, vars
def set_pose_bone_world_transform(arm, pose_bone: bpy.types.PoseBone, world_transform: dict, local_transform: dict):
if arm and pose_bone and world_transform and local_transform:
loc = utils.array_to_vector(world_transform["location"]) * 0.01
rot = utils.array_to_quaternion(world_transform["rotation"])
s = utils.array_to_vector(world_transform["scale"])
sca = Vector((utils.sign(s.x), utils.sign(s.y), utils.sign(s.z))) * arm.scale
M = utils.make_transform_matrix(loc, rot, sca)
pose_bone.matrix = M
def set_mesh_world_transform(arm, mesh_obj: bpy.types.Object, world_transform: dict, local_transform: dict):
if arm and mesh_obj and world_transform and local_transform:
loc = utils.array_to_vector(world_transform["location"]) * 0.01
rot = utils.array_to_quaternion(world_transform["rotation"])
s = utils.array_to_vector(world_transform["scale"])
sca = Vector((utils.sign(s.x), utils.sign(s.y), utils.sign(s.z))) * arm.scale
M = utils.make_transform_matrix(loc, rot, sca)
mesh_obj.matrix_world = M
def bone_name_match(rl_name, blender_name):
if rl_name == "_Object_Pivot_Node_":
rl_name = "CC_Base_Pivot"
export_name = bones.rl_export_bone_name(rl_name)
unduplicated_name = utils.strip_name(blender_name)
if rl_name == unduplicated_name or export_name == unduplicated_name:
return True
return False
def deduplicate_name(name, names: dict):
count = names[name] if name in names else 0
names[name] = count + 1
return f"{name}.{count:03d}" if count > 0 else name
def match_id_tree(rl_tree, arm=None,
pose_bone: bpy.types.PoseBone=None,
mesh_obj: bpy.types.Object=None,
parent_bone: bpy.types.PoseBone=None,
id_map=None,
names=None,
pose=False):
"""If supplying an armature, match the bone tree to the armature and return a mapping (by id)
to the armature bones. If no armature (i.e. for rigified avatars), then map the bones (by id)
to unduplicated bone names"""
if arm and not (pose_bone or mesh_obj):
pose_bone = arm.pose.bones[0]
if id_map is None:
id_map = {}
if names is None:
names = {}
name = pose_bone.name if pose_bone else mesh_obj.name if mesh_obj else deduplicate_name(rl_tree["name"], names)
# id_map is a dict of bones by ID, mapping the source skin_bone name to the armature bone or mesh
id_map[rl_tree["id"]] = {
"source": rl_tree["name"],
"name": name,
"mesh": mesh_obj is not None,
}
# id tree
id_tree = {
"name": name,
"id": rl_tree["id"],
"source": rl_tree["name"],
"children": []
}
world_transform_data = rl_tree.get("world_transform", None)
local_transform_data = rl_tree.get("local_transform", None)
if mesh_obj:
id_tree["mesh"] = True
if pose and world_transform_data:
set_mesh_world_transform(arm, mesh_obj, world_transform_data, local_transform_data)
else:
if pose and world_transform_data:
set_pose_bone_world_transform(arm, pose_bone, world_transform_data, local_transform_data)
#utils.log_detail(f"Bone: {bone.name} / Tree: {rl_tree['name']} {rl_tree['id']}")
for child_tree in rl_tree["children"]:
child_name = child_tree["name"]
#utils.log_detail(f"Trying: {child_name}")
if arm:
found = False
if not found and not child_tree["children"]:
for obj in arm.children:
if obj.parent and obj.parent_type == "BONE" and obj.parent_bone == pose_bone.name:
if bone_name_match(child_name, obj.name):
#utils.log_detail(f" - child_mesh: {obj.name} / child_tree: {child_name} - parented to: {obj.parent_bone}")
found = True
child_tree = match_id_tree(child_tree, arm=arm, mesh_obj=obj, parent_bone=pose_bone, id_map=id_map, pose=pose)[0]
if child_tree:
id_tree["children"].append(child_tree)
break
if not found:
for child_bone in pose_bone.children:
if bone_name_match(child_name, child_bone.name):
#utils.log_detail(f" - child_bone: {child_bone.name} / child_tree: {child_name}")
found = True
child_tree = match_id_tree(child_tree, arm=arm, pose_bone=child_bone, id_map=id_map, pose=pose)[0]
if child_tree:
id_tree["children"].append(child_tree)
break
else:
child_tree = match_id_tree(child_tree, id_map=id_map, names=names)[0]
if child_tree:
id_tree["children"].append(child_tree)
return id_tree, id_map
def confirm_bone_order(bones, ids, id_map: dict):
result = True
for id, id_def in id_map.items():
if id not in ids or id_def["source"] not in bones:
utils.log_warn(f"bone {id_def['source']} ({id}) not found in skin bones!")
result = False
if result and len(ids) < len(id_map):
utils.log_info("All bones present, but more bones found in id_tree!")
elif result:
utils.log_info("All bones present!")
return result
def convert_id_tree(arm, id_tree_root):
if not id_tree_root: return None
id_tree, id_map = match_id_tree(id_tree_root, arm=arm)
return id_tree, id_map
@@ -0,0 +1,17 @@
{
"last_check": "2025-12-30 14:52:43.839129",
"backup_date": "December-15-2025",
"update_ready": true,
"ignore": false,
"just_restored": false,
"just_updated": false,
"version_text": {
"link": "https://api.github.com/repos/soupday/cc_blender_tools/zipball/refs/tags/2_3_4_p0",
"version": [
2,
3,
4,
0
]
}
}
@@ -0,0 +1,468 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
from . import nodeutils, utils, params, lib, vars
MIXER_CHANNELS = [
"RGB_HEADER",
["Red Channel (Skin)", "rgb_red_enabled", "RGB_RED"],
["Green Channel (Hair)", "rgb_green_enabled", "RGB_GREEN"],
["Blue Channel (Mouth)", "rgb_blue_enabled", "RGB_BLUE"],
"ID_HEADER",
["Red Color", "id_red_enabled", "ID_RED"],
["Green Color", "id_green_enabled", "ID_GREEN"],
["Blue Color", "id_blue_enabled", "ID_BLUE"],
["Cyan Color", "id_cyan_enabled", "ID_CYAN"],
["Yellow Color", "id_yellow_enabled", "ID_YELLOW"],
["Magenta Color", "id_magenta_enabled", "ID_MAGENTA"],
]
MIXER_UI = [
["PROP", "Threshold", "threshold"],
["PROP", "Intensity", "intensity"],
["PROP", "Normal", "normal"],
["HEADER", "Base Color", "COLOR"],
["PROP", "Brightness", "color_brightness"],
["PROP", "Contrast", "color_contrast"],
["PROP", "Hue", "color_hue"],
["PROP", "Saturation", "color_saturation"],
["PROP", "Value", "color_value"],
["HEADER", "Metallic", "SURFACE_DATA"],
["PROP", "Brightness", "metallic_brightness"],
["PROP", "Contrast", "metallic_contrast"],
["HEADER", "Specular", "SURFACE_DATA"],
["PROP", "Brightness", "specular_brightness"],
["PROP", "Contrast", "specular_contrast"],
["HEADER", "Roughness", "SURFACE_DATA"],
["PROP", "Brightness", "roughness_brightness"],
["PROP", "Contrast", "roughness_contrast"],
["HEADER", "Emission", "LIGHT"],
["PROP", "Brightness", "emission_brightness"],
["PROP", "Contrast", "emission_contrast"],
]
MIXER_INPUTS = ["Base Color", "Metallic", "Specular", "Roughness", "Alpha", "Emission", "Normal"]
MIXER_PARAMS = [
["Mask Threshold", "threshold"],
["Intensity", "intensity"],
["Color Brightness", "color_brightness"],
["Color Contrast", "color_contrast"],
["Color Hue", "color_hue"],
["Color Saturation", "color_saturation"],
["Color Value", "color_value"],
["Metallic Brightness", "metallic_brightness"],
["Metallic Contrast", "metallic_contrast"],
["Specular Brightness", "specular_brightness"],
["Specular Contrast", "specular_contrast"],
["Roughness Brightness", "roughness_brightness"],
["Roughness Contrast", "roughness_contrast"],
["Emission Brightness", "emission_brightness"],
["Emission Contrast", "emission_contrast"],
["Normal Strength", "normal"],
]
MIXER_MASKS = {
"RGB_RED": (1,0,0,1),
"RGB_GREEN": (0,1,0,1),
"RGB_BLUE": (0,0,1,1),
"ID_RED": (1,0,0,1),
"ID_GREEN": (0,1,0,1),
"ID_BLUE": (0,0,1,1),
"ID_CYAN": (0,1,1,1),
"ID_YELLOW": (1,1,0,1),
"ID_MAGENTA": (1,0,1,1),
}
def update_mixer(mixer, context, field):
props = vars.props()
mixer_type_channel = f"{mixer.type}_{mixer.channel}"
# find the current character and material in context
chr_cache = props.get_context_character_cache(context)
if chr_cache:
mat = utils.get_context_material(context)
if mat and mat.use_nodes:
mixer_node = nodeutils.find_node_by_type_and_keywords(mat.node_tree.nodes, "GROUP", mixer_type_channel)
if mixer_node:
apply_mixer(mixer, mixer_node)
def enable_disable_mixer_image(mixer_settings, context):
props = vars.props()
# find the current character and material in context
chr_cache = props.get_context_character_cache(context)
if chr_cache:
context_mat = utils.get_context_material(context)
if context_mat:
rebuild_mixers(chr_cache, context_mat, mixer_settings)
def enable_disable_mixer(mixer_settings, context, type_channel):
props = vars.props()
# find an existing mixer
mixer_type, mixer_channel = type_channel.split("_")
mixer = mixer_settings.get_mixer(mixer_type, mixer_channel)
enabled = False
if type_channel == "RGB_RED": enabled = mixer_settings.rgb_red_enabled
elif type_channel == "RGB_GREEN": enabled = mixer_settings.rgb_green_enabled
elif type_channel == "RGB_BLUE": enabled = mixer_settings.rgb_blue_enabled
elif type_channel == "ID_RED": enabled = mixer_settings.id_red_enabled
elif type_channel == "ID_GREEN": enabled = mixer_settings.id_green_enabled
elif type_channel == "ID_BLUE": enabled = mixer_settings.id_blue_enabled
elif type_channel == "ID_CYAN": enabled = mixer_settings.id_cyan_enabled
elif type_channel == "ID_YELLOW": enabled = mixer_settings.id_yellow_enabled
elif type_channel == "ID_MAGENTA": enabled = mixer_settings.id_magenta_enabled
# add or remove the given mixer
if mixer:
mixer.enabled = enabled
elif not mixer and enabled:
mixer = mixer_settings.add_mixer(mixer_type, mixer_channel)
# find the current character and material in context
chr_cache = props.get_context_character_cache(context)
if chr_cache:
context_mat = utils.get_context_material(context)
if context_mat:
rebuild_mixers(chr_cache, context_mat, mixer_settings)
def remove_mixer(chr_cache, mat, mixer_settings, type_channel):
mixer_type, mixer_channel = type_channel.split("_")
if type_channel == "RGB_RED": mixer_settings.rgb_red_enabled = False
elif type_channel == "RGB_GREEN": mixer_settings.rgb_green_enabled = False
elif type_channel == "RGB_BLUE": mixer_settings.rgb_blue_enabled = False
elif type_channel == "ID_RED": mixer_settings.id_red_enabled = False
elif type_channel == "ID_GREEN": mixer_settings.id_green_enabled = False
elif type_channel == "ID_BLUE": mixer_settings.id_blue_enabled = False
elif type_channel == "ID_CYAN": mixer_settings.id_cyan_enabled = False
elif type_channel == "ID_YELLOW": mixer_settings.id_yellow_enabled = False
elif type_channel == "ID_MAGENTA": mixer_settings.id_magenta_enabled = False
mixer_settings.remove_mixer(mixer_type, mixer_channel)
rebuild_mixers(chr_cache, mat, mixer_settings)
pass
def rebuild_mixers(chr_cache, context_mat, mixer_settings):
nodes = context_mat.node_tree.nodes
links = context_mat.node_tree.links
mixer_nodes = []
rgb_enabled = mixer_settings.rgb_image is not None
id_enabled = mixer_settings.id_image is not None
rgb_image_node = None
id_image_node = None
rgb_image_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", vars.NODE_PREFIX, "MIXER_RGB_MASK")
if rgb_enabled and not rgb_image_node:
rgb_image_node = nodeutils.make_image_node(nodes, mixer_settings.rgb_image, "MIXER_RGB_MASK")
elif not rgb_enabled and rgb_image_node:
nodes.remove(rgb_image_node)
rgb_image_node = None
if rgb_image_node:
rgb_image_node.location = (-100, -900)
id_image_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", vars.NODE_PREFIX, "MIXER_ID_MASK")
if id_enabled and not id_image_node:
id_image_node = nodeutils.make_image_node(nodes, mixer_settings.id_image, "MIXER_ID_MASK")
elif not id_enabled and id_image_node:
nodes.remove(id_image_node)
id_image_node = None
if id_image_node:
id_image_node.location = (-100, -1200)
for channel_ref in MIXER_CHANNELS:
if type(channel_ref) == list:
mixer_type_channel = channel_ref[2]
mixer_type, mixer_channel = mixer_type_channel.split("_")
show_mixer_type = False
if mixer_type == "RGB":
show_mixer_type = rgb_enabled
elif mixer_type == "ID":
show_mixer_type = id_enabled
mixer = mixer_settings.get_mixer(mixer_type, mixer_channel)
mixer_node = nodeutils.find_node_by_type_and_keywords(nodes, "GROUP", mixer_type_channel)
if mixer:
if show_mixer_type:
if mixer.enabled and not mixer_node:
mixer_node = add_mixer_node(nodes, mixer_type, mixer_channel)
elif not mixer.enabled and mixer_node:
nodes.remove(mixer_node)
mixer_node = None
else:
if mixer_node:
nodes.remove(mixer_node)
mixer_node = None
if mixer and mixer_node:
apply_mixer(mixer, mixer_node)
mixer_nodes.append(mixer_node)
connect_mixers(chr_cache, context_mat, mixer_nodes, rgb_image_node, id_image_node, mixer_settings)
def apply_mixer(mixer, mixer_node):
for param in MIXER_PARAMS:
socket = param[0]
prop = param[1]
eval_code = ""
try:
eval_code = f"mixer.{prop}"
value = eval(eval_code, None, locals())
nodeutils.set_node_input_value(mixer_node, socket, value)
except:
utils.log_error("Unable to evaluate: " + eval_code)
nodeutils.set_node_input_value(mixer_node, "Mask Color", mixer.mask)
nodeutils.set_node_input_value(mixer_node, "Id Color", mixer.mask)
def add_mixer_node(nodes, remap_type, remap_channel):
label = f"Mixer {remap_type}/{remap_channel}"
name = f"rl_mixer_{remap_type}_{remap_channel}"
group = None
mixer_node = None
if remap_type == "RGB":
group = lib.get_node_group("rl_rgb_mixer")
elif remap_type == "ID":
group = lib.get_node_group("rl_id_mixer")
if group:
mixer_node = nodeutils.make_node_group_node(nodes, group, label, name)
return mixer_node
def connect_mixers(chr_cache, mat, mixer_nodes, rgb_image_node, id_image_node, mixer_config):
nodes = mat.node_tree.nodes
links = mat.node_tree.links
mat_cache = chr_cache.get_material_cache(mat)
shader = params.get_shader_name(mat_cache)
bsdf_node, shader_node, mix_node = nodeutils.get_shader_nodes(mat, shader)
output_node = nodeutils.find_node_by_type(nodes, "OUTPUT_MATERIAL")
# daisy chain and position the mixers from the shader_node > mixers > bsdf_node
mixer_nodes.append(bsdf_node)
left_node = shader_node
location = [200, -500]
for mixer_node in mixer_nodes:
if "Mask Map" in mixer_node.inputs and rgb_image_node:
nodeutils.link_nodes(links, rgb_image_node, "Color", mixer_node, "Mask Map")
elif "Id Map" in mixer_node.inputs and id_image_node:
nodeutils.link_nodes(links, id_image_node, "Color", mixer_node, "Id Map")
mixer_node.location = location.copy()
location[0] += 300
right_node = mixer_node
for input in MIXER_INPUTS:
nodeutils.link_nodes(links, left_node, input, right_node, input)
left_node = mixer_node
location[1] = 400
bsdf_node.location = location
location[0] += 700
location[1] = -400
output_node.location = location
class CC3OperatorChannelMixer(bpy.types.Operator):
"""Channel Mixer"""
bl_idname = "cc3.mixer"
bl_label = "Channel Mixer"
bl_options = {"REGISTER", "UNDO"}
param: bpy.props.StringProperty(
name = "param",
default = ""
)
type_channel: bpy.props.StringProperty(
name = "type_channel",
default = ""
)
def execute(self, context):
props = vars.props()
# find the current character and material in context
chr_cache = props.get_context_character_cache(context)
context_mat = utils.get_context_material(context)
if chr_cache and context_mat:
context_mat_cache = chr_cache.get_material_cache(context_mat)
if context_mat_cache:
if self.param == "REMOVE":
remove_mixer(chr_cache, context_mat, context_mat_cache.mixer_settings, self.type_channel)
return {"FINISHED"}
@classmethod
def description(cls, context, properties):
if properties.param == "REMOVE":
return "Remove and reset mixer: " + properties.type_channel
return ""
class CC3MixerBase:
#open_mouth: bpy.props.FloatProperty(default=0.0, min=0, max=1, update=open_mouth_update)
#import_file: bpy.props.StringProperty(default="", subtype="FILE_PATH")
enabled: bpy.props.BoolProperty(default=False)
expanded: bpy.props.BoolProperty(default=True)
intensity: bpy.props.FloatProperty(default=1.0, min=0, max=1, update=lambda s,c: update_mixer(s,c,"intensity"))
color_brightness: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"color_brightness"))
color_contrast: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"color_contrast"))
color_hue: bpy.props.FloatProperty(default=0.5, min=0, max=1, update=lambda s,c: update_mixer(s,c,"color_hue"))
color_saturation: bpy.props.FloatProperty(default=1.0, min=0, max=4, update=lambda s,c: update_mixer(s,c,"color_saturation"))
color_value: bpy.props.FloatProperty(default=1.0, min=0, max=4, update=lambda s,c: update_mixer(s,c,"color_value"))
metallic_brightness: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"metallic_brightness"))
metallic_contrast: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"metallic_contrast"))
specular_brightness: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"specular_brightness"))
specular_contrast: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"specular_contrast"))
roughness_brightness: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"roughness_brightness"))
roughness_contrast: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"roughness_contrast"))
emission_brightness: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"emission_brightness"))
emission_contrast: bpy.props.FloatProperty(default=0.0, min=-2, max=2, update=lambda s,c: update_mixer(s,c,"emission_contrast"))
normal: bpy.props.FloatProperty(default=1.0, min=0, max=2, update=lambda s,c: update_mixer(s,c,"intensity"))
class CC3RGBMixer(bpy.types.PropertyGroup, CC3MixerBase):
type: bpy.props.StringProperty(default="RGB")
channel: bpy.props.EnumProperty(items=[
("RED","Red","Use the red channel as the mask"),
("GREEN","Green","Use the green channel as the mask"),
("BLUE","Blue","Use the blue channel as the mask"),
], default="RED")
mask: bpy.props.FloatVectorProperty(default=(0,0,0,1), subtype="COLOR", size=4)
threshold: bpy.props.FloatProperty(default=0.75, min=0, max=0.993, update=lambda s,c: update_mixer(s,c,"mask_threshold"))
class CC3IDMixer(bpy.types.PropertyGroup, CC3MixerBase):
type: bpy.props.StringProperty(default="ID")
channel: bpy.props.EnumProperty(items=[
("RED","Red","Use the red color as the mask"),
("GREEN","Green","Use the green color as the mask"),
("BLUE","Blue","Use the blue color as the mask"),
("CYAN","Cyan","Use the cyan color as the mask"),
("YELLOW","Yellow","Use the yellow color as the mask"),
("MAGENTA","Magenta","Use the magenta color as the mask"),
], default="RED")
mask: bpy.props.FloatVectorProperty(default=(0,0,0,1), subtype="COLOR", size=4)
threshold: bpy.props.FloatProperty(default=0.6, min=0, max=0.993, update=lambda s,c: update_mixer(s,c,"mask_threshold"))
class CC3MixerSettings(bpy.types.PropertyGroup):
rgb_mixers: bpy.props.CollectionProperty(type=CC3RGBMixer)
id_mixers: bpy.props.CollectionProperty(type=CC3IDMixer)
rgb_image: bpy.props.PointerProperty(type=bpy.types.Image, update=enable_disable_mixer_image)
id_image: bpy.props.PointerProperty(type=bpy.types.Image, update=enable_disable_mixer_image)
rgb_red_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"RGB_RED"))
rgb_green_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"RGB_GREEN"))
rgb_blue_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"RGB_BLUE"))
id_red_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_RED"))
id_green_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_GREEN"))
id_blue_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_BLUE"))
id_cyan_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_CYAN"))
id_yellow_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_YELLOW"))
id_magenta_enabled: bpy.props.BoolProperty(default=False, update=lambda s,c: enable_disable_mixer(s,c,"ID_MAGENTA"))
disabled: bpy.props.BoolProperty(default=False)
def get_mixer(self, type, channel):
if type == "RGB":
for remap in self.rgb_mixers:
if remap.channel == channel:
return remap
else:
for remap in self.id_mixers:
if remap.channel == channel:
return remap
return None
def add_mixer(self, type, channel):
remap = self.get_mixer(type, channel)
if remap is None:
if type == "RGB":
remap = self.rgb_mixers.add()
else:
remap = self.id_mixers.add()
remap.mask = MIXER_MASKS[f"{type}_{channel}"]
remap.channel = channel
remap.enabled = True
return remap
else:
remap.enabled = True
return remap
def disable_mixer(self, type, channel):
remap = self.get_mixer(type, channel)
if remap:
remap.enabled = False
def remove_mixer(self, type, channel):
if type == "RGB":
for index in range(0, len(self.rgb_mixers)):
remap = self.rgb_mixers[index]
if remap.channel == channel:
self.rgb_mixers.remove(index)
return True
else:
for index in range(0, len(self.id_mixers)):
remap = self.id_mixers[index]
if remap.channel == channel:
self.id_mixers.remove(index)
return True
return False
def validate(self, report=None):
return not self.disabled
def invalidate(self):
utils.log_detail(f" - Invalidating Channel mixer:")
self.disabled = True
def delete(self):
if self.disabled:
if utils.image_exists(self.rgb_image):
utils.log_detail(f" - Deleting mixer image: {self.rgb_image.name}")
bpy.data.images.remove(self.rgb_image)
if utils.image_exists(self.id_image):
utils.log_detail(f" - Deleting mixer image: {self.id_image.name}")
bpy.data.images.remove(self.id_image)
def clean_up(self):
if self.disabled:
utils.log_detail(f" - Cleaning up channel mixer:")
self.rgb_mixers.clear()
self.id_mixers.clear()
@@ -0,0 +1,182 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
from . import utils, vars
ALL_COLORSPACES = []
DATA_COLORSPACES = []
def is_aces():
context = vars.get_context()
return context.scene.display_settings.display_device == "ACES"
def try_set_color_space(image : bpy.types.Image, color_space_ref):
prefs = vars.prefs()
try:
image.colorspace_settings.name = color_space_ref
return True
except:
pass
if color_space_ref == "sRGB" or color_space_ref == prefs.aces_srgb_override:
rgb_color_spaces = ["sRGB", "srgb", "role_matte_paint", "Utility - Linear - sRGB"]
for color_space in rgb_color_spaces:
try:
image.colorspace_settings.name = color_space
return True
except:
pass
else:
rgb_color_spaces = ["Non-Color", "non-color", "role_data", "Linear", "linear", "Utility - Raw",
"Generic Data", "generic data", "Linear BT.709", "Raw", "raw",
"Linear Tristimulus", "linear tristimulus"]
for color_space in rgb_color_spaces:
try:
image.colorspace_settings.name = color_space
return True
except:
pass
utils.log_error(f"Unable to set color space: {color_space}")
return False
def set_image_color_space(image : bpy.types.Image, ref_colorspace : str):
prefs = vars.prefs()
if is_aces():
if ref_colorspace == "Non-Color":
try_set_color_space(image, prefs.aces_data_override)
else:
try_set_color_space(image, prefs.aces_srgb_override)
else:
try_set_color_space(image, ref_colorspace)
def try_set_view_transform(view_transform):
context = vars.get_context()
try:
context.scene.view_settings.view_transform = view_transform
return True
except:
pass
try:
context.scene.view_settings.view_transform = "sRGB"
return True
except:
pass
return False
def try_set_look(look):
context = vars.get_context()
try:
context.scene.view_settings.look = look
return True
except:
pass
try:
context.scene.view_settings.look = "NONE"
return True
except:
pass
return False
def set_view_settings(view_transform, look, exposure, gamma):
prefs = vars.prefs()
context = vars.get_context()
if is_aces():
try_set_view_transform("sRGB")
try_set_look("None")
context.scene.view_settings.exposure = 0.0
context.scene.view_settings.gamma = 1.0
else:
if view_transform == "AgX":
if look == "Medium Contrast":
look = "AgX - Base Contrast"
elif look != "None":
look = "AgX - " + look
try_set_view_transform(view_transform)
try_set_look(look)
context.scene.view_settings.exposure = exposure
context.scene.view_settings.gamma = gamma
def fetch_all_color_spaces(self, context):
global ALL_COLORSPACES
if not ALL_COLORSPACES:
i = 0
keys = bpy.types.Image.bl_rna.properties['colorspace_settings'].fixed_type.properties['name'].enum_items.keys()
if "role_matte_paint" in keys:
ALL_COLORSPACES.append(("role_matte_paint", "sRGB", "Default Aces Color (Utility - Linear - sRGB or role_matte_paint)", i))
i += 1
for key in keys:
if key != key.lower():
ALL_COLORSPACES.append((key, key, key, i))
i += 1
return ALL_COLORSPACES
def fetch_data_color_spaces(self, context):
global DATA_COLORSPACES
if not DATA_COLORSPACES:
i = 0
keys = bpy.types.Image.bl_rna.properties['colorspace_settings'].fixed_type.properties['name'].enum_items.keys()
if "role_data" in keys:
DATA_COLORSPACES.append(("role_data", "Raw", "Default Aces Non-Color (Utility - Raw or role_data)", i))
i += 1
for key in keys:
key_lower = key.lower()
if key != key_lower:
if ("data" in key_lower or "raw" in key_lower or "linear" in key_lower or
"xyz" in key_lower or "non-color" in key_lower):
DATA_COLORSPACES.append((key, key, key, i))
i += 1
return DATA_COLORSPACES
def set_sequencer_color_space(color_space):
context = vars.get_context()
if is_aces():
if color_space == "Raw":
context.scene.sequencer_colorspace_settings.name = "Utility - Raw"
else:
context.scene.sequencer_colorspace_settings.name = "Utility - Linear - sRGB"
else:
if utils.B400() and color_space == "Raw":
context.scene.sequencer_colorspace_settings.name = "Non-Color"
else:
context.scene.sequencer_colorspace_settings.name = color_space
@@ -0,0 +1,905 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
import math
import mathutils
from mathutils import Vector
import bmesh
from . import utils
# Code derived from: https://blenderartists.org/t/get-3d-location-of-mesh-surface-point-from-uv-parameter/649486/2
def get_triangulated_bmesh(mesh):
"""Be in object mode"""
if type(mesh) is bpy.types.Object:
mesh = mesh.data
bm = bmesh.new()
bm.from_mesh(mesh)
# viewport seems to use fixed / clipping instead of beauty
bmesh.ops.triangulate(bm, faces=bm.faces, quad_method="BEAUTY", ngon_method="BEAUTY")
bm.faces.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.verts.ensure_lookup_table()
return bm
def get_bmesh(mesh):
"""Be in object mode"""
if type(mesh) is bpy.types.Object:
mesh = mesh.data
bm = bmesh.new()
bm.from_mesh(mesh)
bm.faces.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.verts.ensure_lookup_table()
return bm
def get_world_from_uv(obj, t_mesh, mat_slot, uv_target, threshold):
world = mesh_world_point_from_uv(obj, t_mesh, mat_slot, uv_target)
if world is None: # if the point is outside the UV island(s), just find the nearest vertex.
world = nearest_vert_from_uv(obj, t_mesh, mat_slot, uv_target, threshold, world=True)
if world is None:
utils.log_error("Unable to locate uv target: " + str(uv_target))
return world
def get_local_from_uv(obj, t_mesh, mat_slot, uv_target, threshold):
local = mesh_local_point_from_uv(t_mesh, mat_slot, uv_target)
if local is None: # if the point is outside the UV island(s), just find the nearest vertex.
local = nearest_vert_from_uv(obj, t_mesh, mat_slot, uv_target, threshold, world=False)
if local is None:
utils.log_error("Unable to locate uv target: " + str(uv_target))
return local
def get_uv_from_world(obj, t_mesh, mat_slot, world_co, project=False):
uv = mesh_uv_from_world_point(obj, t_mesh, mat_slot, world_co, project=project)
if uv is None:
utils.log_error("Unable to local point inside UV islands.")
uv = mathutils.Vector((0,0,0))
return uv
def get_uv_from_local(obj, t_mesh, mat_slot, local_co, project=False):
uv = mesh_uv_from_local_point(obj, t_mesh, mat_slot, local_co, project=project)
if uv is None:
utils.log_error("Unable to local point inside UV islands.")
uv = mathutils.Vector((0,0,0))
return uv
def find_coord(obj, ul, uv, face):
u, v, w = [l[ul].uv.to_3d() for l in face.loops]
x, y, z = [v.co for v in face.verts]
co = mathutils.geometry.barycentric_transform(uv, u, v, w, x, y, z)
return obj.matrix_world * co
def mesh_local_point_from_uv(b_mesh, mat_slot, uv):
ul = b_mesh.loops.layers.uv[0]
for face in b_mesh.faces:
if mat_slot == -1 or face.material_index == mat_slot:
u, v, w = [l[ul].uv.to_3d() for l in face.loops]
if mathutils.geometry.intersect_point_tri_2d(uv, u, v, w):
x, y, z = [vert.co for vert in face.verts]
co = mathutils.geometry.barycentric_transform(uv, u, v, w, x, y, z)
return co
return None
def mesh_world_point_from_uv(obj, b_mesh, mat_slot, uv):
ul = b_mesh.loops.layers.uv[0]
for face in b_mesh.faces:
if mat_slot == -1 or face.material_index == mat_slot:
u, v, w = [l[ul].uv.to_3d() for l in face.loops]
if mathutils.geometry.intersect_point_tri_2d(uv, u, v, w):
x, y, z = [vert.co for vert in face.verts]
co = mathutils.geometry.barycentric_transform(uv, u, v, w, x, y, z)
return obj.matrix_world @ co
return None
def mesh_uv_from_world_point(obj, b_mesh, mat_slot, co, project=False):
local_co = obj.matrix_world.inverted() @ co
return mesh_uv_from_local_point(obj, b_mesh, mat_slot, local_co, project=project)
def mesh_uv_from_local_point(obj, b_mesh, mat_slot, co, project=False):
if project:
co = obj.closest_point_on_mesh(co)[1]
ul = b_mesh.loops.layers.uv[0]
best_uv = None
best_z = 1
face : bmesh.types.BMFace
for face in b_mesh.faces:
if face.material_index == mat_slot:
x, y, z = [vert.co for vert in face.verts]
u, v, w = [l[ul].uv.to_3d() for l in face.loops]
uv = mathutils.geometry.barycentric_transform(co, x, y, z, u, v, w)
if mathutils.geometry.intersect_point_tri_2d(uv, u, v, w):
d = abs(mathutils.geometry.distance_point_to_plane(co, x, face.normal))
if mathutils.geometry.intersect_point_tri(co, x, y, z) and d < 0.01:
return uv
if abs(uv.z) < best_z:
best_uv = uv
best_z = abs(uv.z)
return best_uv
def nearest_vert_from_uv(obj, mesh, mat_slot, uv, thresh=0, world=True):
thresh = 2 * thresh * thresh
ul = mesh.loops.layers.uv[0]
near = None
near_dist = math.inf
for face in mesh.faces:
if face.material_index == mat_slot:
for i in range(0, len(face.loops)):
l = face.loops[i]
luv = l[ul].uv
du = luv[0] - uv[0]
dv = luv[1] - uv[1]
dsq = du * du + dv * dv
if dsq < thresh:
return obj.matrix_world @ face.verts[i].co
if dsq < near_dist:
near = face.verts[i]
near_dist = dsq
if near:
if world:
return obj.matrix_world @ near.co
else:
return near.co
else:
return None
def copy_vertex_positions_and_weights(src_obj : bpy.types.Object, dst_obj : bpy.types.Object):
vg_indices = {}
dst_obj.vertex_groups.clear()
src_vg : bpy.types.VertexGroup
for src_vg in src_obj.vertex_groups:
dst_vg = dst_obj.vertex_groups.new(name=src_vg.name)
vg_indices[src_vg.index] = dst_vg.index
src_mesh : bpy.types.Mesh = src_obj.data
dst_mesh : bpy.types.Mesh = dst_obj.data
src_bm = bmesh.new()
dst_bm = bmesh.new()
src_bm.from_mesh(src_mesh)
src_bm.faces.ensure_lookup_table()
src_bm.verts.ensure_lookup_table()
dst_bm.from_mesh(dst_mesh)
dst_bm.faces.ensure_lookup_table()
dst_bm.verts.ensure_lookup_table()
matching_vert_count = len(src_bm.verts) == len(dst_bm.verts)
if matching_vert_count:
src_bm.verts.layers.deform.verify()
dst_bm.verts.layers.deform.verify()
src_dl = src_bm.verts.layers.deform.active
dst_dl = dst_bm.verts.layers.deform.active
for src_vert in src_bm.verts:
i = src_vert.index
dst_vert : bmesh.types.BMVert = dst_bm.verts[i]
for src_vg_index in vg_indices:
dst_vg_index = vg_indices[src_vg_index]
if src_vg_index in src_vert[src_dl]:
dst_vert.co = src_vert.co
dst_vert[dst_dl][dst_vg_index] = src_vert[src_dl][src_vg_index]
dst_bm.to_mesh(dst_mesh)
def copy_vert_positions_by_uv_id(src_obj, dst_obj, accuracy=5, vertex_group=None,
threshold=0.004, shape_key_name=None, flatten_udim=False):
mesh : bpy.types.Mesh = dst_obj.data
if shape_key_name:
if not mesh.shape_keys:
dst_obj.shape_key_add(name = "Basis")
if shape_key_name not in mesh.shape_keys.key_blocks:
shape_key = dst_obj.shape_key_add(name = shape_key_name)
shape_key_name = shape_key.name
src_mesh = src_obj.data
dst_mesh = dst_obj.data
src_bm = bmesh.new()
dst_bm = bmesh.new()
src_bm.from_mesh(src_mesh)
src_bm.faces.ensure_lookup_table()
src_bm.verts.ensure_lookup_table()
dst_bm.from_mesh(dst_mesh)
dst_bm.faces.ensure_lookup_table()
dst_bm.verts.ensure_lookup_table()
src_map = {}
mat_map = {}
overlapping = {}
matching_vert_count = len(src_bm.verts) == len(dst_bm.verts)
for i, src_mat in enumerate(src_mesh.materials):
for j, dst_mat in enumerate(dst_mesh.materials):
if src_mat == dst_mat:
mat_map[i] = j
elif utils.strip_name(src_mat.name) == utils.strip_name(dst_mat.name):
mat_map[i] = j
if len(src_mesh.materials) == 0:
mat_map[0] = 0
vg_index = -1
if vertex_group and vertex_group in src_obj.vertex_groups:
vg_index = src_obj.vertex_groups[vertex_group].index
ul = src_bm.loops.layers.uv[0]
src_bm.verts.layers.deform.verify()
dl = src_bm.verts.layers.deform.active
face : bmesh.types.BMFace
loop : bmesh.types.BMLoop
for face in src_bm.faces:
if face.material_index in mat_map:
dst_material_idx = mat_map[face.material_index]
for loop in face.loops:
if vg_index >= 0:
vert = src_bm.verts[loop.vert.index]
weight = vert[dl][vg_index]
if weight < threshold:
continue
uv = loop[ul].uv.copy()
# why flatten the udims?
# because the in the sculpting tools, the separate sculpting meshes
# must flatten the udims to bake the textures correctly
if flatten_udim:
uv.x -= int(uv.x)
uv_id = uv.to_tuple(accuracy), dst_material_idx
if uv_id in src_map and src_map[uv_id] != loop.vert.index:
overlapping[uv_id] = True
src_map[uv_id] = loop.vert.index
ul = dst_bm.loops.layers.uv[0]
sl = None
if shape_key_name:
sl = dst_bm.verts.layers.shape.get(shape_key_name)
for face in dst_bm.faces:
for loop in face.loops:
uv = loop[ul].uv.copy()
if flatten_udim:
uv.x -= int(uv.x)
uv_id = uv.to_tuple(accuracy), face.material_index
# overlapping UV's can't be detected correctly so try to copy from just the index position
if matching_vert_count and uv_id in overlapping:
vert_index = loop.vert.index
src_pos = src_bm.verts[vert_index].co
if sl:
loop.vert[sl] = src_pos
else:
loop.vert.co = src_pos
elif uv_id in src_map:
src_vert = src_map[uv_id]
src_pos = src_bm.verts[src_vert].co
if sl:
loop.vert[sl] = src_pos
else:
loop.vert.co = src_pos
dst_bm.to_mesh(dst_mesh)
def copy_vert_positions_by_index(src_obj, dst_obj, vertex_group = None, threshold = 0.004, shape_key_name = None):
mesh : bpy.types.Mesh = dst_obj.data
if shape_key_name:
if not mesh.shape_keys:
dst_obj.shape_key_add(name = "Basis")
if shape_key_name not in mesh.shape_keys.key_blocks:
shape_key = dst_obj.shape_key_add(name = shape_key_name)
shape_key_name = shape_key.name
src_mesh = src_obj.data
dst_mesh = dst_obj.data
src_bm = bmesh.new()
dst_bm = bmesh.new()
src_bm.from_mesh(src_mesh)
src_bm.faces.ensure_lookup_table()
src_bm.verts.ensure_lookup_table()
dst_bm.from_mesh(dst_mesh)
dst_bm.faces.ensure_lookup_table()
dst_bm.verts.ensure_lookup_table()
src_verts = []
matching_vert_count = len(src_bm.verts) == len(dst_bm.verts)
if not matching_vert_count:
return
vg_index = -1
if vertex_group and vertex_group in src_obj.vertex_groups:
vg_index = src_obj.vertex_groups[vertex_group].index
src_bm.verts.layers.deform.verify()
dl = src_bm.verts.layers.deform.active
loop : bmesh.types.BMLoop
for vert in src_bm.verts:
if vg_index >= 0:
weight = vert[dl][vg_index]
if weight < threshold:
continue
src_verts.append(vert.index)
sl = None
if shape_key_name:
sl = dst_bm.verts.layers.shape.get(shape_key_name)
for vert in dst_bm.verts:
if vert.index in src_verts:
src_pos = src_bm.verts[vert.index].co
if sl:
vert[sl] = src_pos
else:
vert.co = src_pos
dst_bm.to_mesh(dst_mesh)
def map_image_to_vertex_weights(obj, mat, image, vertex_group, func):
width = image.size[0]
height = image.size[1]
wmo = width - 1
hmo = height - 1
uhw = 1 / (wmo * 2)
vhw = 1 / (hmo * 2)
pixels = image.pixels[:]
if vertex_group in obj.vertex_groups:
vg = obj.vertex_groups[vertex_group]
else:
vg = obj.vertex_groups.new(name=vertex_group)
vg_index = vg.index
mat_index = -1
for i, slot in enumerate(obj.material_slots):
if slot.material and slot.material == mat:
mat_index = i
break
mesh = obj.data
bm = bmesh.new()
bm.from_mesh(mesh)
bm.faces.ensure_lookup_table()
bm.verts.ensure_lookup_table()
ul = bm.loops.layers.uv[0]
bm.verts.layers.deform.verify()
dl = bm.verts.layers.deform.active
for face in bm.faces:
if face.material_index == mat_index:
for loop in face.loops:
uv = loop[ul].uv
uv.x -= int(uv.x)
uv.y -= int(uv.y)
vert = bm.verts[loop.vert.index]
x = int((uv.x + uhw) * wmo)
y = int((uv.y + vhw) * hmo)
pixel_value = pixels[x * 4 + y * width * 4]
weight = func(pixel_value)
vert[dl][vg_index] = weight
bm.to_mesh(mesh)
def add_vertex_groups_to_selected(obj: bpy.types.Object, vertex_groups, weight, remove_empty=True):
# get the vertex group indices
vg_indices = []
vg_map = {}
for vgname in vertex_groups:
if vgname in obj.vertex_groups:
vg = obj.vertex_groups[vgname]
vgi = vg.index
else:
vg = obj.vertex_groups.new(name=vgname)
vgi = vg.index
vg_indices.append(vgi)
vg_map[vgi] = { "name": vgname, "sum": 0 }
# get the bmesh
mesh = obj.data
bm = get_bmesh(mesh)
bm.verts.layers.deform.verify()
dl = bm.verts.layers.deform.active
# set the weights for the vertex groups in each selected vertex to zero
for vert in bm.verts:
for vg_index in vg_indices:
if vg_index in vert[dl]:
if vert.select:
vert[dl][vg_index] = weight
vg_map[vg_index]["sum"] += weight
else:
unselected_weight = vert[dl][vg_index]
vg_map[vg_index]["sum"] += unselected_weight
# apply the changes
bm.to_mesh(mesh)
# remove empty groups
if remove_empty:
for vg_index in vg_map:
if vg_map[vg_index]["sum"] < 0.0001:
vg_name = vg_map[vg_index]["name"]
vg = obj.vertex_groups[vg_name]
utils.log_info(f"Removing empty vertex group: {vg_name} from: {obj.name}")
obj.vertex_groups.remove(vg)
def clean_empty_vertex_groups(obj: bpy.types.Object, bm: bmesh.types.BMesh, exclude=None):
bm.verts.ensure_lookup_table()
bm.verts.layers.deform.verify()
dl = bm.verts.layers.deform.active
vgwt = {}
for vg in obj.vertex_groups:
if exclude and vg.name in exclude:
continue
vgwt[vg.name] = 0.0
for vert in bm.verts:
if vg.index in vert[dl].keys():
vgwt[vg.name] += vert[dl][vg.index]
for vg_name, total_weight in vgwt.items():
if total_weight < 0.0001:
vg = obj.vertex_groups[vg_name]
obj.vertex_groups.remove(vg)
def remove_vertex_groups_from_selected(obj, vertex_groups, remove_empty=True):
# get the bmesh
mesh = obj.data
bm = get_bmesh(mesh)
bm.verts.layers.deform.verify()
dl = bm.verts.layers.deform.active
# get the vertex group indices
vg_indices = []
vg_map = {}
for i, vg in enumerate(obj.vertex_groups):
if vg.name in vertex_groups:
vg_indices.append(i)
vg_map[i] = { "name": vg.name, "sum": 0 }
# set the weights for the vertex groups in each selected vertex to zero
for vert in bm.verts:
for vg_index in vg_indices:
if vg_index in vert[dl]:
if vert.select:
vert[dl][vg_index] = 0.0
else:
weight = vert[dl][vg_index]
vg_map[vg_index]["sum"] += weight
# apply the changes
bm.to_mesh(mesh)
# remove empty groups
if remove_empty:
for vg_index in vg_map:
if vg_map[vg_index]["sum"] < 0.0001:
vg_name = vg_map[vg_index]["name"]
vg = obj.vertex_groups[vg_name]
utils.log_info(f"Removing empty vertex group: {vg_name} from: {obj.name}")
obj.vertex_groups.remove(vg)
def parse_island_recursive(bm, face_index, faces_left, island, face_map, vert_map):
"""Recursive way to parse the UV islands.
Can run out of recursion calls on large meshes.
"""
if face_index in faces_left:
faces_left.remove(face_index)
island.append(face_index)
for uv_id in face_map[face_index]:
connected_faces = vert_map[uv_id]
if connected_faces:
for cf in connected_faces:
parse_island_recursive(bm, cf, faces_left, island, face_map, vert_map)
def parse_island_non_recursive(bm, face_indices, faces_left, island, face_map, vert_map):
"""Non recursive way to parse UV islands.
Connected faces expand the island each iteration.
A Set of all currently considered faces is maintained each iteration.
More memory intensive, but doesn't fail.
"""
levels = 0
while face_indices:
levels += 1
next_indices = set()
for face_index in face_indices:
faces_left.remove(face_index)
island.append(face_index)
for face_index in face_indices:
for uv_id in face_map[face_index]:
connected_faces = vert_map[uv_id]
if connected_faces:
for cf_index in connected_faces:
if cf_index not in island:
next_indices.add(cf_index)
face_indices = next_indices
def get_uv_island_map(bm, uv_layer, island):
"""Fetch the UV coords of each vertex in the UV/Mesh island.
Each island has a unique UV map so this must be called per island.
uv_map = { vert_index: loop.uv, ... }
"""
uv_map = {}
ul = bm.loops.layers.uv[uv_layer]
for face_index in island:
face = bm.faces[face_index]
for loop in face.loops:
uv_map[loop.vert.index] = loop[ul].uv
return uv_map
def get_uv_islands(bm, uv_layer, use_selected = True):
"""Return a list of faces in each distinct uv island."""
face_map = {}
vert_map = {}
uv_map = {}
ul = bm.loops.layers.uv[uv_layer]
if use_selected:
faces = [f for f in bm.faces if f.select and not f.hide]
else:
faces = [f for f in bm.faces if not f.hide]
for face in faces:
for loop in face.loops:
uv_id = loop[ul].uv.to_tuple(5), loop.vert.index
uv_map[loop.vert.index] = loop[ul].uv
if face.index not in face_map:
face_map[face.index] = set()
if uv_id not in vert_map:
vert_map[uv_id] = set()
face_map[face.index].add(uv_id)
vert_map[uv_id].add(face.index)
islands = []
faces_left = set(face_map.keys())
while len(faces_left) > 0:
current_island = []
face_index = list(faces_left)[0]
face_indices = set()
face_indices.add(face_index)
parse_island_non_recursive(bm, face_indices, faces_left, current_island, face_map, vert_map)
islands.append(current_island)
return islands
def get_uv_aligned_edges(bm, island, card_dir, uv_map, get_non_aligned = False, dir_threshold = 0.9):
edge : bmesh.types.BMEdge
face : bmesh.types.BMFace
edges = set()
for i in island:
face = bm.faces[i]
for edge in face.edges:
edges.add(edge.index)
aligned = set()
for e in edges:
edge = bm.edges[e]
uv0 = uv_map[edge.verts[0].index]
uv1 = uv_map[edge.verts[1].index]
V = Vector(uv1) - Vector(uv0)
V.normalize()
dot = card_dir.dot(V)
if get_non_aligned:
if abs(dot) < dir_threshold:
aligned.add(e)
else:
if abs(dot) >= dir_threshold:
aligned.add(e)
return aligned
def get_linked_edge_map(bm, edges):
edge_map = {}
for e in edges:
edge = bm.edges[e]
for vert in edge.verts:
for linked_edge in vert.link_edges:
if linked_edge != edge and linked_edge.index in edges:
if e not in edge_map:
edge_map[e] = set()
edge_map[e].add(linked_edge.index)
return edge_map
def get_boundary_edges(bm, island):
face : bmesh.types.BMFace
edge : bmesh.types.BMEdge
edges = set()
for face_index in island:
face = bm.faces[face_index]
for edge in face.edges:
if edge.is_boundary:
edges.add(edge.index)
return edges
def count_adjacent_faces(face : bmesh.types.BMFace):
edge : bmesh.types.BMEdge
count = 0
for edge in face.edges:
for f in edge.link_faces:
if f != face:
count += 1
return count
def get_uv_bounds(uv_map):
min = Vector((9999,9999))
max = Vector((-9999,-9999))
for vert_index in uv_map:
uv = uv_map[vert_index]
if uv.x < min.x: min.x = uv.x
if uv.x > max.x: max.x = uv.x
if uv.y < min.y: min.y = uv.y
if uv.y > max.y: max.y = uv.y
return min, max
def is_island_grid(bm : bmesh.types.BMesh, island : list):
"""island: list of face indices"""
adjacent_count = {}
for face_index in island:
face = bm.faces[face_index]
count = count_adjacent_faces(face)
if count not in adjacent_count:
adjacent_count[count] = 0
adjacent_count[count] += 1
num_faces = len(island)
# test for a 1 x N strip
if len(adjacent_count) == 2 and 1 in adjacent_count and 2 in adjacent_count:
if adjacent_count[1] == 2 and adjacent_count[2] == num_faces - 2:
return True
# test for a 2 x N grid
elif len(adjacent_count) == 2 and 2 in adjacent_count and 3 in adjacent_count:
if adjacent_count[2] == 4 and adjacent_count[3] == num_faces - 4:
return True
# test for a N x M grid
elif len(adjacent_count) == 3 and 2 in adjacent_count and 3 in adjacent_count and 4 in adjacent_count:
if adjacent_count[2] == 4 and adjacent_count[3] + adjacent_count [4] == num_faces - 4:
return True
return False
def get_average_edge_length(obj):
avg = 0.0
if utils.object_exists_is_mesh(obj):
bm = get_bmesh(obj.data)
edge : bmesh.types.BMEdge
l = 0.0
n = 0
for edge in bm.edges:
l += edge.calc_length()
n += 1
if n > 0:
avg = l / n
bm.free()
avg *= obj.matrix_world.median_scale
return avg
def get_area(obj):
area = 0.0
if utils.object_exists_is_mesh(obj):
bm = get_bmesh(obj.data)
face : bmesh.types.BMFace
for face in bm.faces:
area += face.calc_area()
bm.free()
area *= pow(obj.matrix_world.median_scale, 2)
return area
def intersects_projected_face(p: Vector, PMW, f: bmesh.types.BMFace, FMW):
PW = p @ PMW
cw = f.calc_center_median() @ FMW
pcw = PW - cw
fnw = (f.normal @ FMW).normalized()
if pcw.dot(fnw) < 0:
return None
d = pcw.length
#d = mathutils.geometry.distance_point_to_plane(PW, cw, fnw)
#if d < 0: return None
dfn: Vector = fnw / d
vw0 = f.verts[0].co @ FMW
vw1 = f.verts[1].co @ FMW
vw2 = f.verts[2].co @ FMW
nw0 = (f.verts[0].normal @ FMW).normalized()
nw1 = (f.verts[1].normal @ FMW).normalized()
nw2 = (f.verts[2].normal @ FMW).normalized()
dnw0 = dfn.dot(nw0)
dnw1 = dfn.dot(nw1)
dnw2 = dfn.dot(nw2)
VW0 = vw0 + nw0 / dnw0
VW1 = vw1 + nw1 / dnw1
VW2 = vw2 + nw2 / dnw2
u, v, w = barycentric_coords(PW, VW0, VW1, VW2)
if u < 0 or u > 1 or v < 0 or v > 1 or w < 0 or w > 1:
return None
#diag_mesh_add_edge(cw, cw + fnw * 0.01)
diag_mesh_add_tri(vw0, vw1, vw2)
#diag_mesh_add_edge(f0, f0 + fnw0 * 0.01)
#diag_mesh_add_edge(f1, f1 + fnw1 * 0.01)
#diag_mesh_add_edge(f2, f2 + fnw2 * 0.01)
diag_mesh_add_tri(VW0, VW1, VW2)
diag_mesh_add_edge(vw0, VW0)
diag_mesh_add_edge(vw1, VW1)
diag_mesh_add_edge(vw2, VW2)
diag_mesh_add_edge(cw, PW)
return u, v, w, d, pcw.length
def barycentric_coords(p: Vector, a: Vector, b: Vector, c: Vector):
v0 = b - a
v1 = c - a
v2 = p - a
d00 = v0.dot(v0)
d01 = v0.dot(v1)
d11 = v1.dot(v1)
d20 = v2.dot(v0)
d21 = v2.dot(v1)
denom = d00 * d11 - d01 * d01
v = (d11 * d20 - d01 * d21) / denom
w = (d00 * d21 - d01 * d20) / denom
u = 1 - v - w
return (u, v, w)
def barycentric_weight(b_co, w0, w1, w2):
bc_u, bc_v, bc_w = b_co
return bc_u * w0 + bc_v * w1 + bc_w * w2
def map_body_weight_blends(body, obj, bm_obj: bmesh.types.BMesh):
weight_blends = {}
v: bmesh.types.BMVert
f: bmesh.types.BMFace
BMW = body.matrix_world
BMWI = BMW.inverted()
OMW = obj.matrix_world
# object local to body local matrix
OLTBL = BMWI @ OMW
for v in bm_obj.verts:
#diag_mesh_create()
#diag_to_bmesh()
obj_world_co = OMW @ v.co
body_local_co = OLTBL @ v.co
success, closest_local_co, closest_local_no, closest_face_index = body.closest_point_on_mesh(body_local_co)
if success:
closest_world_co = BMW @ closest_local_co
delta = obj_world_co - closest_world_co
no = (BMW @ closest_local_no).normalized()
if delta.dot(no) < 0:
weight_blends[v.index] = 0
else:
weight_blends[v.index] = delta.length
#diag_mesh_add_edge(closest_world_co, obj_world_co)
else:
#diag_mesh_add_vert(body_local_co)
weight_blends[v.index] = -1
#diag_from_bmesh()
return weight_blends
def fetch_vertex_layer_weights(bm: bmesh.types.BMesh, layer_index):
bm.verts.layers.deform.verify()
dl = bm.verts.layers.deform.active
weights = {}
for vert in bm.verts:
try:
weights[vert.index] = vert[dl][layer_index]
except:
weights[vert.index] = 0.0
return weights
DIAG_NAME = "DiagnosticMesh"
DIAG = None
DIAG_BM = None
def diag_mesh_create():
global DIAG, DIAG_NAME
if DIAG_NAME in bpy.data.objects:
DIAG = bpy.data.objects[DIAG_NAME]
else:
mesh = bpy.data.meshes.new(DIAG_NAME)
DIAG = bpy.data.objects.new(DIAG_NAME, mesh)
DIAG.location = [0,0,0]
bpy.context.collection.objects.link(DIAG)
DIAG.name = DIAG_NAME
return DIAG
def diag_to_bmesh() -> bmesh.types.BMesh:
global DIAG_BM
if DIAG_BM:
return DIAG_BM
else:
diag = diag_mesh_create()
DIAG_BM = get_bmesh(diag.data)
return DIAG_BM
def diag_finish():
global DIAG, DIAG_BM
if DIAG and DIAG_BM:
DIAG_BM.to_mesh(DIAG.data)
def diag_mesh_add_vert(p0: Vector):
bm = diag_to_bmesh()
bm.verts.new(p0)
def diag_mesh_add_edge(p0: Vector, p1: Vector):
bm = diag_to_bmesh()
v0 = bm.verts.new(p0)
v1 = bm.verts.new(p1)
bm.edges.new((v0, v1))
def diag_mesh_add_tri(p0: Vector, p1: Vector, p2: Vector):
bm = diag_to_bmesh()
v0 = bm.verts.new(p0)
v1 = bm.verts.new(p1)
v2 = bm.verts.new(p2)
bm.faces.new((v0, v1, v2))
@@ -0,0 +1,423 @@
import bpy
from mathutils import Euler
from . import bones, utils
def fix_armature(arm):
if arm:
utils.set_only_active_object(arm)
facing = get_vrm_rig_facing(arm)
utils.log_info(f"VRM Alignment: Forward = {facing}")
if facing != "-Y":
utils.log_info("Aligning armature: Forward = -Y")
utils.set_transform_rotation(arm, Euler((0, 0, 3.1415926535897)))
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
restore_bone_display(arm)
def get_vrm_rig_facing(arm):
"""VRM1.0 aligns with forward +Y
VRM2.0 aligns with forward -Y
need to figure out which is which."""
root_bone = bones.get_pose_bone(arm, "Root")
toe_bone = bones.get_pose_bone(arm, "J_Bip_L_ToeBase")
if not (root_bone and toe_bone):
return "-Y"
delta = arm.matrix_world @ toe_bone.head - arm.matrix_world @ root_bone.head
if delta.y > 0:
return "Y"
else:
return "-Y"
def restore_bone_display(arm):
utils.object_mode_to(arm)
pose_bone: bpy.types.PoseBone
for pose_bone in arm.pose.bones:
pose_bone.custom_shape = None
def pack_rotation(name, rot):
return f"{name} = {rot.x},{rot.y},{rot.z},{rot.w},\n"
def pack_bone(arm, pose_bone: bpy.types.PoseBone):
bone_name = pose_bone.name
parent_bone = pose_bone.parent
if parent_bone:
rot = (parent_bone.matrix.inverted() @ pose_bone.matrix).to_quaternion()
else:
rot = pose_bone.matrix.to_quaternion()
return pack_rotation(bone_name, rot)
def generate_hik_profile(arm, name, path, hik_template):
bone_list = ""
bone_list += pack_rotation(name, arm.rotation_quaternion)
for pose_bone in arm.pose.bones:
bone_list += pack_bone(arm, pose_bone)
for child in arm.children:
if child.type == "MESH":
bone_list += pack_rotation(child.name, child.rotation_quaternion)
hik_template = hik_template.replace("$BONE_LIST", bone_list)
with open(path, "w") as write_file:
utils.log_info(f"Writing VRM HIK Profile: {path}")
write_file.write(hik_template)
return True
RIGIFY_METARIG_PROFILE_TEMPLATE = """
[BoneMapOption]
Prefix =
[BoneMap]
f_index_01_L = LeftHandIndex1
f_index_01_R = RightHandIndex1
f_index_02_L = LeftHandIndex2
f_index_02_R = RightHandIndex2
f_index_03_L = LeftHandIndex3
f_index_03_R = RightHandIndex3
f_middle_01_L = LeftHandMiddle1
f_middle_01_R = RightHandMiddle1
f_middle_02_L = LeftHandMiddle2
f_middle_02_R = RightHandMiddle2
f_middle_03_L = LeftHandMiddle3
f_middle_03_R = RightHandMiddle3
f_pinky_01_L = LeftHandPinky1
f_pinky_01_R = RightHandPinky1
f_pinky_02_L = LeftHandPinky2
f_pinky_02_R = RightHandPinky2
f_pinky_03_L = LeftHandPinky3
f_pinky_03_R = RightHandPinky3
f_ring_01_L = LeftHandRing1
f_ring_01_R = RightHandRing1
f_ring_02_L = LeftHandRing2
f_ring_02_R = RightHandRing2
f_ring_03_L = LeftHandRing3
f_ring_03_R = RightHandRing3
foot_L = LeftFoot
foot_R = RightFoot
forearm_L = LeftForeArm
forearm_L_001 = LeftForeArmRoll
forearm_R = RightForeArm
forearm_R_001 = RightForeArmRoll
hand_L = LeftHand
hand_R = RightHand
shin_L = LeftLeg
shin_L_001 = LeftLegRoll
shin_R = RightLeg
shin_R_001 = RightLegRoll
shoulder_L = LeftShoulder
shoulder_R = RightShoulder
spine = Hips
spine_001 = Spine
spine_002 = Spine1
spine_003 = Spine2
spine_004 = Neck
spine_005 = Neck1
spine_006 = Head
thigh_L = LeftUpLeg
thigh_L_001 = LeftUpLegRoll
thigh_R = RightUpLeg
thigh_R_001 = RightUpLegRoll
thumb_01_L = LeftHandThumb1
thumb_01_R = RightHandThumb1
thumb_02_L = LeftHandThumb2
thumb_02_R = RightHandThumb2
thumb_03_L = LeftHandThumb3
thumb_03_R = RightHandThumb3
upper_arm_L = LeftArm
upper_arm_L_001 = LeftArmRoll
upper_arm_R = RightArm
upper_arm_R_001 = RightArmRoll
[BoneRotate]
$BONE_LIST
[RootTransform]
Value = 1.,1.,1.,1.,0.,0.,0.,1.,0.,0.,0.,1.,0.,0.,0.,
[FacialMap_LEye]
Bone0 = eye_L
[FacialMap_REye]
Bone0 = eye_R
[BoneTypeOfBonesUnderHead]
jaw = Facial
tongue = Facial
tongue_001 = Unused
tongue_002 = Unused
teeth_T = Facial
teeth_B = Facial
eye_R = Facial
eye_L = Facial
"""
RIGIFY_CC_BASE_PROFILE_TEMPLATE = """
[BoneMapOption]
Prefix =
[BoneMap]
CC_Base_Head = Head
CC_Base_Hip = Hips
CC_Base_L_Calf = LeftLeg
CC_Base_L_CalfTwist = LeftLegRoll
CC_Base_L_Clavicle = LeftShoulder
CC_Base_L_Foot = LeftFoot
CC_Base_L_Forearm = LeftForeArm
CC_Base_L_ForearmTwist = LeftForeArmRoll
CC_Base_L_Hand = LeftHand
CC_Base_L_Index1 = LeftHandIndex1
CC_Base_L_Index2 = LeftHandIndex2
CC_Base_L_Index3 = LeftHandIndex3
CC_Base_L_Mid1 = LeftHandMiddle1
CC_Base_L_Mid2 = LeftHandMiddle2
CC_Base_L_Mid3 = LeftHandMiddle3
CC_Base_L_Pinky1 = LeftHandPinky1
CC_Base_L_Pinky2 = LeftHandPinky2
CC_Base_L_Pinky3 = LeftHandPinky3
CC_Base_L_Ring1 = LeftHandRing1
CC_Base_L_Ring2 = LeftHandRing2
CC_Base_L_Ring3 = LeftHandRing3
CC_Base_L_Thigh = LeftUpLeg
CC_Base_L_ThighTwist = LeftUpLegRoll
CC_Base_L_Thumb1 = LeftHandThumb1
CC_Base_L_Thumb2 = LeftHandThumb2
CC_Base_L_Thumb3 = LeftHandThumb3
CC_Base_L_Upperarm = LeftArm
CC_Base_L_UpperarmTwist = LeftArmRoll
CC_Base_NeckTwist01 = Neck
CC_Base_NeckTwist02 = Neck1
CC_Base_R_Calf = RightLeg
CC_Base_R_CalfTwist = RightLegRoll
CC_Base_R_Clavicle = RightShoulder
CC_Base_R_Foot = RightFoot
CC_Base_R_Forearm = RightForeArm
CC_Base_R_ForearmTwist = RightForeArmRoll
CC_Base_R_Hand = RightHand
CC_Base_R_Index1 = RightHandIndex1
CC_Base_R_Index2 = RightHandIndex2
CC_Base_R_Index3 = RightHandIndex3
CC_Base_R_Mid1 = RightHandMiddle1
CC_Base_R_Mid2 = RightHandMiddle2
CC_Base_R_Mid3 = RightHandMiddle3
CC_Base_R_Pinky1 = RightHandPinky1
CC_Base_R_Pinky2 = RightHandPinky2
CC_Base_R_Pinky3 = RightHandPinky3
CC_Base_R_Ring1 = RightHandRing1
CC_Base_R_Ring2 = RightHandRing2
CC_Base_R_Ring3 = RightHandRing3
CC_Base_R_Thigh = RightUpLeg
CC_Base_R_ThighTwist = RightUpLegRoll
CC_Base_R_Thumb1 = RightHandThumb1
CC_Base_R_Thumb2 = RightHandThumb2
CC_Base_R_Thumb3 = RightHandThumb3
CC_Base_R_Upperarm = RightArm
CC_Base_R_UpperarmTwist = RightArmRoll
CC_Base_Spine01 = Spine1
CC_Base_Spine02 = Spine2
CC_Base_Waist = Spine
[BoneRotate]
$BONE_LIST
[RootTransform]
Value = 1.,1.,1.,1.,0.,0.,0.,1.,0.,0.,0.,1.,0.,0.,0.,
[FacialMap_LEye]
Bone0 = CC_Base_L_Eye
[FacialMap_REye]
Bone0 = CC_Base_R_Eye
[BoneTypeOfBonesUnderHead]
CC_Base_JawRoot = Facial
CC_Base_Tongue01 = Facial
CC_Base_Tongue02 = Unused
CC_Base_Tongue03 = Unused
CC_Base_Teeth01 = Facial
CC_Base_Teeth02 = Facial
CC_Base_L_Eye = Facial
CC_Base_R_Eye = Facial
"""
RIGIFY_BASE_PROFILE_TEMPLATE = """
[BoneMapOption]
Prefix =
[BoneMap]
Rigify_Head = Head
Rigify_Hip = Hips
Rigify_L_Calf = LeftLeg
Rigify_L_CalfTwist = LeftLegRoll
Rigify_L_Clavicle = LeftShoulder
Rigify_L_Foot = LeftFoot
Rigify_L_Forearm = LeftForeArm
Rigify_L_ForearmTwist = LeftForeArmRoll
Rigify_L_Hand = LeftHand
Rigify_L_Index1 = LeftHandIndex1
Rigify_L_Index2 = LeftHandIndex2
Rigify_L_Index3 = LeftHandIndex3
Rigify_L_Mid1 = LeftHandMiddle1
Rigify_L_Mid2 = LeftHandMiddle2
Rigify_L_Mid3 = LeftHandMiddle3
Rigify_L_Pinky1 = LeftHandPinky1
Rigify_L_Pinky2 = LeftHandPinky2
Rigify_L_Pinky3 = LeftHandPinky3
Rigify_L_Ring1 = LeftHandRing1
Rigify_L_Ring2 = LeftHandRing2
Rigify_L_Ring3 = LeftHandRing3
Rigify_L_Thigh = LeftUpLeg
Rigify_L_ThighTwist = LeftUpLegRoll
Rigify_L_Thumb1 = LeftHandThumb1
Rigify_L_Thumb2 = LeftHandThumb2
Rigify_L_Thumb3 = LeftHandThumb3
Rigify_L_Upperarm = LeftArm
Rigify_L_UpperarmTwist = LeftArmRoll
Rigify_NeckTwist01 = Neck
Rigify_NeckTwist02 = Neck1
Rigify_R_Calf = RightLeg
Rigify_R_CalfTwist = RightLegRoll
Rigify_R_Clavicle = RightShoulder
Rigify_R_Foot = RightFoot
Rigify_R_Forearm = RightForeArm
Rigify_R_ForearmTwist = RightForeArmRoll
Rigify_R_Hand = RightHand
Rigify_R_Index1 = RightHandIndex1
Rigify_R_Index2 = RightHandIndex2
Rigify_R_Index3 = RightHandIndex3
Rigify_R_Mid1 = RightHandMiddle1
Rigify_R_Mid2 = RightHandMiddle2
Rigify_R_Mid3 = RightHandMiddle3
Rigify_R_Pinky1 = RightHandPinky1
Rigify_R_Pinky2 = RightHandPinky2
Rigify_R_Pinky3 = RightHandPinky3
Rigify_R_Ring1 = RightHandRing1
Rigify_R_Ring2 = RightHandRing2
Rigify_R_Ring3 = RightHandRing3
Rigify_R_Thigh = RightUpLeg
Rigify_R_ThighTwist = RightUpLegRoll
Rigify_R_Thumb1 = RightHandThumb1
Rigify_R_Thumb2 = RightHandThumb2
Rigify_R_Thumb3 = RightHandThumb3
Rigify_R_Upperarm = RightArm
Rigify_R_UpperarmTwist = RightArmRoll
Rigify_Spine01 = Spine1
Rigify_Spine02 = Spine2
Rigify_Waist = Spine
[BoneRotate]
$BONE_LIST
[RootTransform]
Value = 1.,1.,1.,1.,0.,0.,0.,1.,0.,0.,0.,1.,0.,0.,0.,
[FacialMap_LEye]
Bone0 = Rigify_L_Eye
[FacialMap_REye]
Bone0 = Rigify_R_Eye
[BoneTypeOfBonesUnderHead]
Rigify_JawRoot = Facial
Rigify_Tongue01 = Facial
Rigify_Tongue02 = Unused
Rigify_Tongue03 = Unused
Rigify_Teeth01 = Facial
Rigify_Teeth02 = Facial
Rigify_L_Eye = Facial
Rigify_R_Eye = Facial
"""
VRM_HIK_PROFILE_TEMPLATE = """
[BoneMapOption]
Prefix =
[BoneMap]
J_Bip_C_Chest = Spine1
J_Bip_C_Head = Head
J_Bip_C_Hips = Hips
J_Bip_C_Neck = Neck
J_Bip_C_Spine = Spine
J_Bip_C_UpperChest = Spine2
J_Bip_L_Foot = LeftFoot
J_Bip_L_Hand = LeftHand
J_Bip_L_Index1 = LeftHandIndex1
J_Bip_L_Index2 = LeftHandIndex2
J_Bip_L_Index3 = LeftHandIndex3
J_Bip_L_Little1 = LeftHandPinky1
J_Bip_L_Little2 = LeftHandPinky2
J_Bip_L_Little3 = LeftHandPinky3
J_Bip_L_LowerArm = LeftForeArm
J_Bip_L_LowerLeg = LeftLeg
J_Bip_L_Middle1 = LeftHandMiddle1
J_Bip_L_Middle2 = LeftHandMiddle2
J_Bip_L_Middle3 = LeftHandMiddle3
J_Bip_L_Ring1 = LeftHandRing1
J_Bip_L_Ring2 = LeftHandRing2
J_Bip_L_Ring3 = LeftHandRing3
J_Bip_L_Shoulder = LeftShoulder
J_Bip_L_Thumb1 = LeftHandThumb1
J_Bip_L_Thumb2 = LeftHandThumb2
J_Bip_L_Thumb3 = LeftHandThumb3
J_Bip_L_UpperArm = LeftArm
J_Bip_L_UpperLeg = LeftUpLeg
J_Bip_R_Foot = RightFoot
J_Bip_R_Hand = RightHand
J_Bip_R_Index1 = RightHandIndex1
J_Bip_R_Index2 = RightHandIndex2
J_Bip_R_Index3 = RightHandIndex3
J_Bip_R_Little1 = RightHandPinky1
J_Bip_R_Little2 = RightHandPinky2
J_Bip_R_Little3 = RightHandPinky3
J_Bip_R_LowerArm = RightForeArm
J_Bip_R_LowerLeg = RightLeg
J_Bip_R_Middle1 = RightHandMiddle1
J_Bip_R_Middle2 = RightHandMiddle2
J_Bip_R_Middle3 = RightHandMiddle3
J_Bip_R_Ring1 = RightHandRing1
J_Bip_R_Ring2 = RightHandRing2
J_Bip_R_Ring3 = RightHandRing3
J_Bip_R_Shoulder = RightShoulder
J_Bip_R_Thumb1 = RightHandThumb1
J_Bip_R_Thumb2 = RightHandThumb2
J_Bip_R_Thumb3 = RightHandThumb3
J_Bip_R_UpperArm = RightArm
J_Bip_R_UpperLeg = RightUpLeg
[BoneRotate]
$BONE_LIST
[RootTransform]
Value = 1.,100.,100.,100.,0.,0.,0.,1.,0.,0.,0.,1.,0.,0.,0.,
[BoneTypeOfBonesUnderHead]
J_Adj_L_FaceEye = Facial
J_Adj_R_FaceEye = Facial
"""
@@ -0,0 +1,39 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
import bpy.utils.previews
from . import utils
ICONS: bpy.utils.previews.ImagePreviewCollection = None
ICON_WRINKLE_REGIONS = None
def register():
global ICONS
global ICON_WRINKLE_REGIONS
ICONS = bpy.utils.previews.new()
ICON_WRINKLE_REGIONS = ICONS.load("wrinkle_bg", utils.get_resource_path("icons", "wrinkle_bg.png"), "IMAGE")
def unregister():
global ICONS
global ICON_WRINKLE_REGIONS
ICONS.clear()
ICON_WRINKLE_REGIONS = None
@@ -0,0 +1,468 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import os
import bpy
from . import colorspace, nodeutils, params, lib, utils, vars
IMAGE_FORMATS = {
"PNG": ".png",
"JPEG": ".jpg",
"BMP": ".bmp",
"TARGA": ".tga",
"JPEG2000": "jp2",
"IRIS": ".rgb",
"TARGA_RAW": ".tga",
"CINEON": ".cin",
"DPX": ".dpx",
"OPEN_EXR_MULTILAYER": ".exr",
"OPEN_EXR": ".exr",
"HDR": ".hdr",
"TIFF": ".tif",
"WEBP": ".webp",
}
def check_max_size(image):
prefs = vars.prefs()
width = image.size[0]
height = image.size[1]
if width > prefs.max_texture_size or height > prefs.max_texture_size:
image.scale(min(width, prefs.max_texture_size), min(height, prefs.max_texture_size))
# load an image from a file, but try to find it in the existing images first
def load_image(filename, color_space, processed_images = None, reuse_existing = True):
i: bpy.types.Image = None
# TODO: should the de-duplication only consider images brough in from the import.
# (but then the rebuild won't work...)
# or only consider images with the characters folder as a common path...
if reuse_existing:
for i in bpy.data.images:
if i.type == "IMAGE" and i.filepath != "":
if os.path.normpath(bpy.path.abspath(i.filepath)) == os.path.normpath(os.path.abspath(filename)):
utils.log_info("Using existing image: " + i.filepath)
found = False
image_md5 = None
image_path = bpy.path.abspath(i.filepath)
if processed_images is not None and os.path.exists(image_path):
image_md5 = utils.md5sum(image_path)
for p in processed_images:
if p[0] == image_md5:
utils.log_info("Skipping duplicate existing image, reusing: " + p[1].filepath)
i = p[1]
found = True
if (i.depth == 32 or i.depth == 128) and i.alpha_mode != "CHANNEL_PACKED":
i.alpha_mode = "CHANNEL_PACKED"
if processed_images is not None and i and image_md5 and not found:
processed_images.append([image_md5, i])
if not i.is_dirty:
utils.log_detail(f"Reloading image: {i.name}")
try:
i.reload()
except:
utils.log_detail(f"Unable to reload image: {i.name}")
else:
utils.log_info(f"Image {i.name} has been modified, keeping in-memory image.")
colorspace.set_image_color_space(i, color_space)
return i
try:
image_md5 = None
if processed_images is not None and os.path.exists(filename):
image_md5 = utils.md5sum(filename)
for p in processed_images:
if p[0] == image_md5 and utils.image_exists(p[1]):
utils.log_info("Skipping duplicate image, reusing existing: " + p[1].filepath)
return p[1]
utils.log_info("Loading new image: " + filename)
image = bpy.data.images.load(filename)
colorspace.set_image_color_space(image, color_space)
if (image.depth == 32 or image.depth == 128):
image.alpha_mode = "CHANNEL_PACKED"
#check_max_size(image)
if processed_images is not None and image and image_md5:
processed_images.append([image_md5, image])
return image
except Exception as e:
utils.log_error("Unable to load image: " + filename, e)
return None
## Search the directory for an image filename that contains the search substring
def find_image_file(base_dir, dirs, mat, texture_type):
suffix_list = get_image_type_suffix_list(texture_type)
material_name = utils.strip_name(mat.name).lower()
last = ""
for dir in dirs:
if dir:
# if the texture folder does not exist, (e.g. files have been moved)
# remap the relative path to the current blend file directory to try and find the images there
if not os.path.exists(dir):
dir = utils.local_repath(dir, base_dir)
dir = os.path.normpath(dir)
if dir and os.path.exists(dir):
if last != dir:
last = dir
for suffix in suffix_list:
search = f"{material_name}_{suffix}"
file = find_file_by_name(dir, search)
if file:
return file
return None
def find_file_by_name(search_dir, search):
"""Find the file by the name (without extension)."""
search = search.lower()
if os.path.exists(search_dir):
files = os.listdir(search_dir)
for f in files:
dir, file = os.path.split(f)
name, ext = os.path.splitext(file)
name = name.lower()
if name == search:
return os.path.join(search_dir, f)
return None
def is_image_type_srgb(texture_type):
for tex in params.TEXTURE_TYPES:
if tex[0] == texture_type:
return tex[2]
return False
def get_image_type_suffix_list(texture_type):
for tex in params.TEXTURE_TYPES:
if tex[0] == texture_type:
return tex[3]
return []
def get_image_type_json_id(texture_type):
for tex in params.TEXTURE_TYPES:
if tex[0] == texture_type:
return tex[1]
return None
def get_image_type_lib_name(texture_type):
for tex in params.TEXTURE_TYPES:
if tex[0] == texture_type and len(tex) > 5:
return tex[5]
return None
def get_image_type_size_group(texture_type):
for tex in params.TEXTURE_TYPES:
if tex[0] == texture_type:
return tex[4]
return None
def is_library_tex(texture_type):
if get_image_type_lib_name(texture_type):
return True
return False
def search_image_in_material_dirs(chr_cache, mat_cache, mat, texture_type):
return find_image_file(chr_cache.get_import_dir(), [mat_cache.get_tex_dir(chr_cache), chr_cache.get_tex_dir()], mat, texture_type)
def get_max_sized_width_height(width, height, max_size):
if width > max_size or height > max_size:
if width > height:
width = max_size
height = int(height * max_size / width)
elif height > width:
height = max_size
width = int(width * max_size / height)
else:
width = max_size
height = max_size
return width, height
def apply_max_size(image: bpy.types.Image, texture_type):
prefs = vars.prefs()
if prefs.use_max_tex_size:
size_group = get_image_type_size_group(texture_type)
max_size = int(prefs.size_max_tex_default)
if size_group == "DETAIL":
max_size = int(prefs.size_max_tex_detail)
elif size_group == "MINIMAL":
max_size = int(prefs.size_max_tex_minimal)
width = image.size[0]
height = image.size[0]
if width > max_size or height > max_size:
if width > height:
width = max_size
height = int(height * max_size / width)
elif height > width:
height = max_size
width = int(width * max_size / height)
else:
width = max_size
height = max_size
utils.log_info(f"resizing image: {image.name} (size_group) to {width} x {height}")
image.scale(width, height)
def find_material_image(mat, texture_type, processed_images = None, tex_json = None, mat_json = None):
"""Try to find the texture for a material input by searching for the material name
appended with the possible suffixes e.g. Vest_diffuse or Hair_roughness
"""
props = vars.props()
mat_cache = props.get_material_cache(mat)
chr_cache = props.get_character_cache(None, mat)
image_file = None
color_space = "Non-Color"
if is_image_type_srgb(texture_type):
color_space = "sRGB"
# temp weight maps in the cache override weight maps on disk
if texture_type == "WEIGHTMAP" and mat_cache.temp_weight_map is not None:
utils.log_info(f"Using material cache user weightmap: {mat_cache.temp_weight_map.name}")
return mat_cache.temp_weight_map
# try to find as library image
lib_name = get_image_type_lib_name(texture_type)
if lib_name:
image = lib.get_image(lib_name)
colorspace.set_image_color_space(image, color_space)
if image:
apply_max_size(image, texture_type)
return image
# try to find the image in the json data first:
if tex_json:
tex_path: str = utils.fix_texture_rel_path(tex_json["Texture Path"])
is_tex_path_relative = not os.path.isabs(tex_path)
if tex_path:
if is_tex_path_relative:
image_file = os.path.normpath(os.path.join(chr_cache.get_import_dir(), tex_path))
else:
image_file = os.path.normpath(tex_path)
# try to load image path directly
if os.path.exists(image_file):
image = load_image(image_file, color_space, processed_images)
if image:
apply_max_size(image, texture_type)
return image
# try remapping the image path relative to the local directory
if is_tex_path_relative:
image_file = utils.local_path(tex_path)
if image_file and os.path.exists(image_file):
image = load_image(image_file, color_space, processed_images)
if image:
apply_max_size(image, texture_type)
return image
# try to find the image in the texture_mappings (all embedded images should be here)
for tex_mapping in mat_cache.texture_mappings:
if tex_mapping:
if texture_type == tex_mapping.texture_type:
if tex_mapping.image:
image = tex_mapping.image
apply_max_size(image, texture_type)
return image
utils.log_error(f"{texture_type} - json image path not found: {tex_path}")
return None
# if there is a mat_json but no texture json, then there is no texture to use
# (so don't look for one as it could find the wrong one i.e. fbm files with duplicated names)
#elif mat_json
#
# utils.log_warn(f"No {texture_type} json data found!")
# return None
# with no Json data, try to locate the images in the texture folders:
else:
# try to find the image in the texture_mappings (all embedded images should be here)
if mat_cache:
for tex_mapping in mat_cache.texture_mappings:
if tex_mapping:
if texture_type == tex_mapping.texture_type:
if tex_mapping.image:
utils.log_info(f"Using embedded image: {tex_mapping.image.name}")
image = tex_mapping.image
apply_max_size(image, texture_type)
return image
image_file = search_image_in_material_dirs(chr_cache, mat_cache, mat, texture_type)
if image_file:
image = load_image(image_file, color_space, processed_images)
if image:
apply_max_size(image, texture_type)
return image
# then try to find the image in the texture_mappings (all embedded images should be here)
for tex_mapping in mat_cache.texture_mappings:
if tex_mapping:
if texture_type == tex_mapping.texture_type:
if tex_mapping.image:
image = tex_mapping.image
apply_max_size(image, texture_type)
return image
elif tex_mapping.texture_path is not None and tex_mapping.texture_path != "":
image = load_image(tex_mapping.texture_path, color_space, processed_images)
if image:
apply_max_size(image, texture_type)
return image
return None
def get_material_tex_dir(chr_cache, obj, mat):
"""Returns the *relative* path to the texture folder for this material.
"""
props = vars.props()
if chr_cache.is_import_type("FBX"):
object_name = utils.strip_name(obj.name)
mesh_name = utils.strip_name(obj.data.name)
material_name = utils.strip_name(mat.name)
# non .fbm textures are stored in two possible locations:
# /textures/character_name/object_name/mesh_name/material_name
# or /textures/character_name/character_name/mesh_name/material_name
rel_object = os.path.join("textures", chr_cache.get_character_id(), object_name, mesh_name, material_name)
path_object = os.path.join(chr_cache.get_import_dir(), rel_object)
rel_character = os.path.join("textures", chr_cache.get_character_id(), chr_cache.get_character_id(), mesh_name, material_name)
path_character = os.path.join(chr_cache.get_import_dir(), rel_character)
if os.path.exists(path_object):
return rel_object
elif os.path.exists(path_character):
return rel_character
else:
return os.path.join(chr_cache.get_character_id() + ".fbm")
elif chr_cache.is_import_type("OBJ"):
return chr_cache.get_character_id()
def get_material_tex_dirs(chr_cache, obj, mat):
mat_dir = os.path.normpath(os.path.join(chr_cache.get_import_dir(), get_material_tex_dir(chr_cache, obj, mat)))
return [chr_cache.get_tex_dir(), mat_dir]
def find_texture_folder_in_objects(objects):
for obj in objects:
if obj.type == "MESH":
for mat in obj.data.materials:
if mat.node_tree:
nodes = mat.node_tree.nodes
for node in nodes:
if node.type == "TEX_IMAGE":
image = node.image
if image.filepath:
file_path = bpy.path.abspath(image.filepath)
folder = os.path.dirname(file_path)
if folder:
return folder
return None
def get_custom_image(image_name, size, alpha=False, data=True, float=False, path="", unique=False):
# find the image by name
image = None
if unique:
image_name = utils.unique_image_name(image_name)
else:
if image_name in bpy.data.images:
image = bpy.data.images[image_name]
if image.size[0] != size or image.size[1] != size:
bpy.data.images.remove(image)
image = None
utils.log_info(f"Deleting Custom image: {image_name}, wrong size.")
else:
utils.log_info(f"Reusing Custom image: {image_name}")
# or create the bake image
if not image:
utils.log_info(f"Creating new Custom image: {image_name} {size}x{size}")
image = bpy.data.images.new(image_name, size, size, alpha=alpha, is_data=data, float_buffer=float)
if float:
image.use_half_precision = False
if path:
image.filepath_raw = path
image.save()
return image
def save_scene_image(image : bpy.types.Image, file_path, file_format = 'PNG', color_depth = '8'):
"""To reload properly, the image must be pre-saved with image.filepath_raw = ... and image.save()"""
scene = bpy.data.scenes.new("RL_Save_Image_Settings_Scene")
settings = scene.render.image_settings
settings.color_depth = color_depth
settings.file_format = file_format
settings.color_mode = 'RGB' if image.depth == 24 else 'RGBA'
if not file_path and image.filepath:
file_path = bpy.path.abspath(image.filepath)
image.save_render(filepath = file_path, scene = scene)
if image.filepath:
image.reload()
bpy.data.scenes.remove(scene)
def make_new_image(name, width, height, format, dir, data, has_alpha, channel_packed):
img = bpy.data.images.new(name, width, height, alpha=has_alpha, is_data=data)
img.pixels[0] = 0
if has_alpha:
img.alpha_mode = "STRAIGHT" if not channel_packed else "CHANNEL_PACKED"
return save_image_to_format_dir(img, format, dir, name)
def save_image_to_format_dir(img, format, dir, name):
if format in IMAGE_FORMATS:
ext = IMAGE_FORMATS[format]
else:
format = "PNG"
ext = ".png"
img.file_format = format
full_dir = os.path.normpath(dir)
full_path = os.path.normpath(os.path.join(full_dir, name + ext))
utils.log_info(f" Path: {full_path}")
os.makedirs(full_dir, exist_ok=True)
img.filepath_raw = full_path
img.save()
return img
@@ -0,0 +1,700 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import json
import os
import bpy
import copy
from . import utils
JSON_CACHE = {}
def get_json_cache_copy(fbx_path):
if fbx_path in JSON_CACHE:
json_data = JSON_CACHE[fbx_path]
if json_data is not None:
return copy.deepcopy(json_data)
return None
def get_json_path(fbx_path):
json_path = None
if fbx_path:
fbx_file = os.path.basename(fbx_path)
fbx_folder = os.path.dirname(fbx_path)
fbx_name = os.path.splitext(fbx_file)[0]
json_path = os.path.join(fbx_folder, fbx_name + ".json")
return json_path
def read_json(fbx_path, errors, no_local=False):
json_file_exists = False
json_cache = get_json_cache_copy(fbx_path)
if json_cache:
return json_cache
try:
fbx_file = os.path.basename(fbx_path)
fbx_folder = os.path.dirname(fbx_path)
fbx_name = os.path.splitext(fbx_file)[0]
json_path = os.path.join(fbx_folder, fbx_name + ".json")
# if the json doesn't exist in the expected path, look for it in the blend file path
if not os.path.exists(json_path):
json_path = utils.local_path(fbx_name + ".json")
if json_path and os.path.exists(json_path):
json_file_exists = True
# json_local is a custom version of the json created by the update/replace operator
# to incorporate new & replaced objects and materials though the datalink
json_local_path = json_path + "_local"
if os.path.exists(json_local_path):
json_path = json_local_path
# determine start of json text data
file_bytes = open(json_path, "rb")
bytes = file_bytes.read(3)
file_bytes.close()
start = 0
# json files outputted from Visual Studio projects start with a byte mark order block (3 bytes EF BB BF)
if bytes[0] == 0xEF and bytes[1] == 0xBB and bytes[2] == 0xBF:
start = 3
# read json text
file = open(json_path, "rt")
file.seek(start)
text_data = file.read()
json_data = json.loads(text_data)
file.close()
JSON_CACHE[fbx_path] = json_data
utils.log_info("Json data successfully parsed: " + json_path)
return json_data
utils.log_info("No Json data to parse, using defaults...")
JSON_CACHE[fbx_path] = None
if errors:
errors.append("NO_JSON")
return None
except:
utils.log_warn("Failed to read Json data: " + json_path)
if errors:
if json_file_exists:
errors.append("CORRUPT")
else:
errors.append("PATH_FAILED")
return None
def write_json(json_data, path, is_fbx_path=False, is_json_local=False, update_cache=False):
if is_fbx_path:
if update_cache:
JSON_CACHE[path] = json_data
file = os.path.basename(path)
folder = os.path.dirname(path)
name = os.path.splitext(file)[0]
path = os.path.join(folder, name + ".json")
if is_json_local:
path += "_local"
json_object = json.dumps(json_data, indent = 4)
with open(path, "w") as write_file:
write_file.write(json_object)
def safe_name(o):
if type(o) is str:
return utils.strip_name(o).lower()
else:
return utils.strip_name(o.name).lower()
def get_all_object_keys(chr_json):
if chr_json:
meshes_json = chr_json["Meshes"]
return meshes_json.keys()
return []
def get_all_material_keys(chr_json):
if chr_json:
keys = []
meshes_json = chr_json["Meshes"]
for obj_json_name in meshes_json.keys():
obj_json = meshes_json[obj_json_name]
materials_json = obj_json["Materials"]
for mat_key in materials_json.keys():
keys.append(mat_key)
return keys
def get_character_generation_json(chr_json, character_id):
try:
return chr_json[character_id]["Object"][character_id]["Generation"]
except:
utils.log_warn("Failed to read character generation data!")
return None
def set_character_generation_json(chr_json, character_id, generation):
try:
chr_json[character_id]["Object"][character_id]["Generation"] = generation
return True
except:
utils.log_warn(f"Failed to set character generation to: {generation}")
return False
def get_character_root_json(json_data, character_id):
if not json_data:
return None
try:
return json_data[character_id]["Object"]
except:
utils.log_warn("Failed to get character root Json data!")
return None
def get_character_json(json_data, character_id):
if not json_data:
return None
try:
chr_json = json_data[character_id]["Object"][character_id]
utils.log_detail("Character Json data found for: " + character_id)
return chr_json
except:
utils.log_warn("Failed to get character Json data!")
return None
def get_object_info_json(json_data, character_id):
if not json_data:
return None
try:
info_json = json_data[character_id]["Object_Info"]
utils.log_detail("Character Object Info Json data found for: " + character_id)
return info_json
except:
utils.log_warn("Failed to get character object info Json data!")
return None
def get_facial_profile_json(json_data, character_id):
try:
return json_data[character_id]["Facial_Profile"]
except:
return None
def get_facial_profile_categories_json(json_data, character_id):
try:
return json_data[character_id]["Facial_Profile"]["Categories"]
except:
return None
def set_facial_profile_categories_json(json_data, character_id, categories_json):
try:
if "Facial_Profile" not in json_data[character_id].keys():
json_data[character_id]["Facial_Profile"] = {}
json_data[character_id]["Facial_Profile"]["Categories"] = categories_json
return True
except:
return False
def get_object_json(chr_json, obj):
if not chr_json:
return None
try:
name = safe_name(obj)
meshes_json = chr_json["Meshes"]
for object_name in meshes_json.keys():
if object_name.lower() == name:
utils.log_detail("Object Json data found for: " + name)
return meshes_json[object_name]
except:
utils.log_warn("Failed to get object Json data!")
return None
def get_object_json_key(chr_json, obj_json):
if not chr_json:
return None
if obj_json is None:
return None
meshes_json: dict = chr_json["Meshes"]
for key, value in meshes_json.items():
if value == obj_json:
return key
return None
def get_physics_json(chr_json):
try:
return chr_json["Physics"]
except:
return None
def get_soft_physics_json(physics_json, obj, mat):
try:
obj_name = safe_name(obj)
mat_name = safe_name(mat)
soft_physics_mesh_json = physics_json["Soft Physics"]["Meshes"]
for object_name in soft_physics_mesh_json:
if object_name.lower() == obj_name:
materials_json = soft_physics_mesh_json[object_name]["Materials"]
for material_name in materials_json:
if material_name.lower() == mat_name:
return materials_json[material_name]
return None
except:
utils.log_warn("Failed to get soft physics material Json data!")
return None
def get_physics_mesh_json(soft_physics_json, obj):
if not soft_physics_json:
return None
try:
name = safe_name(obj)
for object_name in soft_physics_json.keys():
if object_name.lower() == name:
utils.log_detail("Physics Object Json data found for: " + name)
return soft_physics_json[object_name]
except:
utils.log_warn("Failed to get physics object Json data!")
return None
def get_physics_mesh_json_key(soft_physics_json, physics_mesh_json):
if not soft_physics_json:
return None
if physics_mesh_json is None:
return None
for key, value in soft_physics_json.items():
if value == physics_mesh_json:
return key
return None
def get_custom_shader(mat_json):
try:
return mat_json["Custom Shader"]["Shader Name"]
except:
try:
return mat_json["Material Type"]
except:
utils.log_warn("Failed to find material shader data!")
return "Pbr"
def get_material_json(obj_json, material):
if not obj_json:
return None
try:
name = safe_name(material)
materials_json = obj_json["Materials"]
for material_name in materials_json.keys():
if material_name.lower() == name:
utils.log_detail("Material Json data found for: " + name)
return materials_json[material_name]
except:
utils.log_warn("Failed to get material Json data!")
return None
def get_material_json_key(obj_json, mat_json):
if not obj_json:
return None
if mat_json is None:
return None
materials_json: dict = obj_json["Materials"]
for key, value in materials_json.items():
if value == mat_json:
return key
return None
def get_physics_material_json(physics_mesh_json, material):
if not physics_mesh_json:
return None
try:
name = safe_name(material)
materials_json = physics_mesh_json["Materials"]
for material_name in materials_json.keys():
if material_name.lower() == name:
utils.log_detail("Physics Material Json data found for: " + name)
return materials_json[material_name]
except:
utils.log_warn("Failed to get physics material Json data!")
return None
def get_physics_material_json_key(physics_mesh_json, physics_mat_json):
if not physics_mesh_json:
return None
if physics_mat_json is None:
return None
materials_json: dict = physics_mesh_json["Materials"]
for key, value in materials_json.items():
if value == physics_mat_json:
return key
return None
def get_texture_info(mat_json, texture_id):
tex_info = get_pbr_texture_info(mat_json, texture_id)
if tex_info is None:
tex_info = get_shader_texture_info(mat_json, texture_id)
if tex_info is None:
tex_info = get_wrinkle_texture_info(mat_json, texture_id)
return tex_info
def get_texture_channel_strength(mat_json, texture_id, default_value=None):
tex_info = get_texture_info(mat_json, texture_id)
if tex_info and "Strength" in tex_info:
return tex_info["Strength"] / 100
return default_value
def get_pbr_texture_info(mat_json, texture_id):
if not mat_json:
return None
try:
return mat_json["Textures"][texture_id]
except:
return None
def get_shader_texture_info(mat_json, texture_id):
if not mat_json:
return None
try:
return mat_json["Custom Shader"]["Image"][texture_id]
except:
return None
def get_wrinkle_texture_info(mat_json, texture_id):
if not mat_json:
return None
try:
return mat_json["Wrinkle"]["Textures"][texture_id]
except: ...
try:
return mat_json["Resource Textures"][texture_id]
except: ...
return None
def get_material_json_var(mat_json, var_path: str):
paths = var_path.split('/')
var_type = paths[0]
var_name = paths[1]
if var_type == "Custom":
return get_shader_var(mat_json, var_name)
elif var_type == "Reflection":
return get_direct_shader_var(mat_json, var_name)
elif var_type == "SSS":
return get_sss_var(mat_json, var_name)
elif var_type == "Pbr":
return get_pbr_var(mat_json, var_name, paths)
else: # var_type == "Base":
return get_material_var(mat_json, var_name)
def get_shader_var(mat_json, var_name):
if not mat_json:
return None
try:
return mat_json["Custom Shader"]["Variable"][var_name]
except:
return None
def get_direct_shader_var(mat_json, var_name):
if not mat_json:
return None
try:
return mat_json["Custom Shader"][var_name]
except:
return None
def get_pbr_var(mat_json, var_name, paths):
if not mat_json:
return None
try:
tex_json = mat_json["Textures"][var_name]
if len(paths) == 2 and var_name == "Displacement":
return (tex_json.get("Multiplier", 1.0) *
tex_json.get("Strength", 100.0) / 100.0)
elif len(paths) == 3:
return tex_json.get(paths[2], 1.0)
else:
return tex_json.get("Strength", 100.0) / 100.0
except:
return None
def get_material_var(mat_json, var_name):
if not mat_json:
return None
try:
return mat_json[var_name]
except:
return None
def get_sss_var(mat_json, var_name):
if not mat_json:
return None
try:
return mat_json["Subsurface Scatter"][var_name]
except:
return None
def set_material_json_var(mat_json, var_path: str, value):
paths = var_path.split('/')
var_type = paths[0]
var_name = paths[1]
if var_type == "Custom":
set_shader_var(mat_json, var_name, value)
elif var_type == "SSS":
set_sss_var(mat_json, var_name, value)
elif var_type == "Pbr":
set_pbr_var(mat_json, var_name, paths, value)
else: # var_type == "Base":
set_material_var(mat_json, var_name, value)
def set_shader_var(mat_json, var_name, value):
if mat_json:
try:
mat_json["Custom Shader"]["Variable"][var_name] = value
except:
return
def set_pbr_var(mat_json, var_name, paths, value):
if mat_json:
try:
if len(paths) == 3:
mat_json["Textures"][var_name][paths[2]] = value
else:
# metallic and roughness don't have controllable strength settings, so always set to max
if var_name == "Metallic" or var_name == "Roughness":
value = 1.0
mat_json["Textures"][var_name]["Strength"] = value * 100.0
except:
return
def set_material_var(mat_json, var_name, value):
if mat_json:
try:
mat_json[var_name] = value
except:
return
def set_sss_var(mat_json, var_name, value):
if mat_json:
try:
mat_json["Subsurface Scatter"][var_name] = value
except:
return
def convert_to_color(json_var):
if type(json_var) == list:
for i in range(0, len(json_var)):
json_var[i] /= 255.0
if len(json_var) == 3:
json_var.append(1)
return json_var
def convert_from_color(color):
try:
return [ int(color[0] * 255.0), int(color[1] * 255.0), int(color[2] * 255.0) ]
except:
return [255,255,255]
def get_shader_var_color(mat_json, var_name):
if not mat_json:
return None
try:
json_color = mat_json["Custom Shader"]["Variable"][var_name]
return convert_to_color(json_color)
except:
return None
def get_json(json_data, path: str, default=None):
if json_data:
keys = path.split("/")
for key in keys:
if key in json_data:
json_data = json_data[key]
else:
return default
return json_data
return default
def set_json(json_data, path: str, value):
if json_data:
json_key = None
keys = path.split("/")
for i, key in enumerate(keys):
if key in json_data:
if i == len(keys) - 1:
json_data[key] = value
return True
else:
json_data = json_data[key]
else:
break
return False
def generate_character_base_json_data(name):
json_data = {
name: {
"Version": "1.10.1822.1",
"Scene": {
"Name": True,
"SupportShaderSelect": True
},
"Object": {
name: {
"Generation": "",
"Meshes": {
},
},
},
}
}
return json_data
def add_json_path(json_data, path):
keys = path.split("/")
for key in keys:
if key not in json_data.keys():
json_data[key] = {}
json_data = json_data[key]
return json_data
def rename_json_key(json_data, old_name, new_name):
if old_name in json_data.keys():
json_data[new_name] = json_data.pop(old_name)
return True
return False
def add_physics_json(json_data, character_id, collider_source_json=None, collider_source_id=None):
phys_meshes = add_json_path(json_data, f"{character_id}/Object/{character_id}/Physics/Soft Physics/Meshes")
colliders = add_json_path(json_data, f"{character_id}/Object/{character_id}/Physics/Soft Physics/Collision Shapes")
if collider_source_json and collider_source_id:
collider_source = get_json(collider_source_json,
f"{collider_source_id}/Object/{collider_source_id}/Physics/Collision Shapes")
colliders = copy.deepcopy(collider_source)
set_json(json_data,
f"{character_id}/Object/{character_id}/Physics/Soft Physics/Collision Shapes",
colliders)
return phys_meshes, colliders
def get_character_meshes_json(json_data, character_id):
meshes_json: dict = None
phys_meshes_json: dict = None
try:
meshes_json = json_data[character_id]["Object"][character_id]["Meshes"]
except:
pass
try:
phys_meshes_json = json_data[character_id]["Object"][character_id]["Physics"]["Soft Physics"]["Meshes"]
except:
pass
return meshes_json, phys_meshes_json
def get_physics_collision_shapes_json(json_data, character_id) -> dict:
try:
return json_data[character_id]["Object"][character_id]["Physics"]["Collision Shapes"]
except:
pass
def remap_mesh_json_tex_paths(obj_json, phys_json, from_dir, to_dir):
if obj_json and "Materials" in obj_json:
for mat_name in obj_json["Materials"]:
mat_json = obj_json["Materials"][mat_name]
if "Textures" in mat_json:
for tex_channel in mat_json["Textures"]:
tex_info = mat_json["Textures"][tex_channel]
tex_path = tex_info["Texture Path"]
if tex_path:
full_path = os.path.normpath(os.path.join(from_dir, tex_path))
rel_path = os.path.relpath(full_path, to_dir).replace(r"\\","/")
tex_info["Texture Path"] = rel_path
if "Custom Shader" in mat_json:
for custom_channel in mat_json["Custom Shader"]["Image"]:
tex_info = mat_json["Custom Shader"]["Image"][custom_channel]
tex_path = tex_info["Texture Path"]
if tex_path:
full_path = os.path.normpath(os.path.join(from_dir, tex_path))
rel_path = os.path.relpath(full_path, to_dir).replace(r"\\","/")
tex_info["Texture Path"] = rel_path
if phys_json and "Materials" in phys_json:
for mat_name in phys_json["Materials"]:
mat_json = phys_json["Materials"][mat_name]
if "Weight Map Path" in mat_json:
tex_path = mat_json["Weight Map Path"]
if tex_path:
full_path = os.path.normpath(os.path.join(from_dir, tex_path))
rel_path = os.path.relpath(full_path, to_dir).replace(r"\\","/")
tex_info["Texture Path"] = rel_path
def get_meshes_images(meshes_json, filter=None):
images = set()
for mesh_name in meshes_json:
if filter and mesh_name not in filter: continue
mesh_json = meshes_json[mesh_name]
for mat_name in mesh_json["Materials"]:
mat_json = mesh_json["Materials"][mat_name]
if "Textures" in mat_json:
for tex_channel in mat_json["Textures"]:
tex_info = mat_json["Textures"][tex_channel]
tex_path = tex_info["Texture Path"]
if tex_path:
images.add(os.path.normpath(tex_path))
if "Custom Shader" in mat_json:
for custom_channel in mat_json["Custom Shader"]["Image"]:
tex_info = mat_json["Custom Shader"]["Image"][custom_channel]
tex_path = tex_info["Texture Path"]
if tex_path:
images.add(os.path.normpath(tex_path))
return images
def get_displacement_data(mat_json):
texture_path = get_json(mat_json, "Textures/Displacement/Texture Path", "")
strength = get_json(mat_json, "Textures/Displacement/Strength", 0.0) / 100.0
level = int(get_json(mat_json, "Textures/Displacement/Tessellation Level", 0))
multiplier = get_json(mat_json, "Textures/Displacement/Multiplier", 1.0)
base = get_json(mat_json, "Textures/Displacement/Gray-scale Base Value", 0.0)
return texture_path, strength, level, multiplier, base
@@ -0,0 +1,172 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy, os
from . import utils, vars
def get_object(object_names,
lib_tag="RL_Library_Object",
allow_duplicates=True,
names=None):
single = False
if type(object_names) is str:
object_names = [ object_names ]
if names:
names = [ names ]
single = True
appended_objects = [None]*len(object_names)
found = 0
if not allow_duplicates:
for obj in bpy.data.objects:
for i, object_name in enumerate(object_names):
if ((obj.name.startswith(object_name) or
(obj.name.startswith(names[i]))) and
utils.prop(obj, lib_tag) and
is_version(obj)):
appended_objects[i] = obj
found += 1
files = [ {"name": object_name } for i, object_name in enumerate(object_names) if appended_objects[i] is None ]
if files:
path = os.path.dirname(os.path.realpath(__file__))
filename = "_LIB341.blend"
datablock = "Object"
file = os.path.join(path, filename)
if os.path.exists(file):
objects = utils.get_set(bpy.data.objects)
bpy.ops.wm.append(directory=os.path.join(path, filename, datablock),
files=files,
set_fake=True,
link=False)
new = utils.get_set_new(bpy.data.objects, objects)
for i, object_name in enumerate(object_names):
if appended_objects[i] is None:
for obj in new:
if utils.strip_name(obj.name) == object_name and lib_tag not in obj:
obj[lib_tag] = True
obj["RL_Addon_Version"] = vars.VERSION_STRING
if names:
obj.name = names[i]
try:
obj.data.name = names[i]
except: ...
utils.log_info(f"Appended Library Object: {path} / {object_name} > {obj.name}")
appended_objects[i] = obj
found += 1
if found < len(object_names):
raise ValueError(f"Unable to append all Library Objects: {object_names} from {path}")
if single:
return appended_objects[0]
else:
return appended_objects
def get_image(image_name, lib_tag="RL_Library_Image"):
for img in bpy.data.images:
if (img.name.startswith(image_name) and
utils.prop(img, lib_tag) and
is_version(img)):
if not img.packed_file:
img.pack()
return img
path = os.path.dirname(os.path.realpath(__file__))
filename = "_LIB341.blend"
datablock = "Image"
file = os.path.join(path, filename)
appended_image = None
if os.path.exists(file):
images = utils.get_set(bpy.data.images)
bpy.ops.wm.append(directory=os.path.join(path, filename, datablock),
filename=image_name,
set_fake=True,
link=False)
new = utils.get_set_new(bpy.data.images, images)
for img in new:
if utils.strip_name(img.name) == image_name and lib_tag not in img:
img[lib_tag] = True
img["RL_Addon_Version"] = vars.VERSION_STRING
utils.log_info(f"Appended Library Image: {path} / {image_name} > {img.name}")
appended_image = img
if not appended_image:
raise ValueError(f"Unable to append Library Image: {image_name} from {path}")
else:
if not appended_image.packed_file:
appended_image.pack()
return appended_image
def get_node_group(group_name, lib_tag="RL_Node_Group"):
for node_tree in bpy.data.node_groups:
if (node_tree.name.startswith(group_name) and
utils.prop(node_tree, lib_tag) and
is_version(node_tree)):
return node_tree
path = os.path.dirname(os.path.realpath(__file__))
filename = "_LIB341.blend"
datablock = "NodeTree"
file = os.path.join(path, filename)
appended_object = None
if os.path.exists(file):
node_groups = utils.get_set(bpy.data.node_groups)
bpy.ops.wm.append(directory=os.path.join(path, filename, datablock),
filename=group_name,
set_fake=True,
link=False)
new = utils.get_set_new(bpy.data.node_groups, node_groups)
for node_tree in new:
if utils.strip_name(node_tree.name) == group_name and lib_tag not in node_tree:
node_tree[lib_tag] = True
node_tree["RL_Addon_Version"] = vars.VERSION_STRING
utils.log_info(f"Appended Library Node Group: {path} / {group_name} > {node_tree.name}")
appended_object = node_tree
if not appended_object:
raise ValueError(f"Unable to append Library Image: {group_name} from {path}")
return appended_object
def check_node_groups():
for name in vars.NODE_GROUPS:
get_node_group(name)
def remove_all_groups():
for group in bpy.data.node_groups:
if vars.NODE_PREFIX in group.name or "RL_Node_Group" in group:
bpy.data.node_groups.remove(group)
def rebuild_node_groups():
remove_all_groups()
check_node_groups()
return
def is_version(obj):
return (vars.VERSION_STRING in obj.name or
utils.prop(obj, "RL_Addon_Version") == vars.VERSION_STRING)
@@ -0,0 +1,704 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import math
import bpy, bmesh, mathutils
from . import materials, geom, jsonutils, utils, vars
def add_vertex_group(obj, name):
if name not in obj.vertex_groups:
return obj.vertex_groups.new(name = name)
else:
#group = obj.vertex_groups[name]
#clear_vertex_group(obj, group)
return obj.vertex_groups[name]
def remove_vertex_group(obj : bpy.types.Object, name):
if name in obj.vertex_groups:
obj.vertex_groups.remove(obj.vertex_groups[name])
def get_vertex_group(obj, names) -> bpy.types.VertexGroup:
if type(names) is str:
names = [ names ]
for name in names:
if name in obj.vertex_groups:
return obj.vertex_groups[name]
return None
def clear_vertex_group(obj, vertex_group: bpy.types.VertexGroup):
all_verts = []
for v in obj.data.vertices:
all_verts.append(v.index)
vertex_group.remove(all_verts)
def set_vertex_group(obj, vertex_group, value):
if type(vertex_group) is str:
try:
vertex_group = obj.vertex_groups[vertex_group]
except:
vertex_group = None
if vertex_group:
all_verts = []
for v in obj.data.vertices:
all_verts.append(v.index)
vertex_group.add(all_verts, value, 'ADD')
def count_vertex_group(obj, vertex_group: bpy.types.VertexGroup):
if type(vertex_group) is str or type(vertex_group) is list:
vertex_group = get_vertex_group(obj, vertex_group)
count = 0
if vertex_group:
vg_idx = vertex_group.index
for vert in obj.data.vertices:
for g in vert.groups:
if g.group == vg_idx:
count += 1
return count
def total_vertex_group_weight(obj, vertex_group: bpy.types.VertexGroup):
if type(vertex_group) is str or type(vertex_group) is list:
vertex_group = get_vertex_group(obj, vertex_group)
weight = 0.0
if vertex_group:
vg_idx = vertex_group.index
for vert in obj.data.vertices:
for g in vert.groups:
if g.group == vg_idx:
weight += g.weight
return weight
def generate_eye_occlusion_vertex_groups(obj, mat_left, mat_right):
vertex_group_inner_l = add_vertex_group(obj, vars.OCCLUSION_GROUP_INNER + "_L")
vertex_group_outer_l = add_vertex_group(obj, vars.OCCLUSION_GROUP_OUTER + "_L")
vertex_group_top_l = add_vertex_group(obj, vars.OCCLUSION_GROUP_TOP + "_L")
vertex_group_bottom_l = add_vertex_group(obj, vars.OCCLUSION_GROUP_BOTTOM + "_L")
vertex_group_all_l = add_vertex_group(obj, vars.OCCLUSION_GROUP_ALL + "_L")
vertex_group_inner_r = add_vertex_group(obj, vars.OCCLUSION_GROUP_INNER + "_R")
vertex_group_outer_r = add_vertex_group(obj, vars.OCCLUSION_GROUP_OUTER + "_R")
vertex_group_top_r = add_vertex_group(obj, vars.OCCLUSION_GROUP_TOP + "_R")
vertex_group_bottom_r = add_vertex_group(obj, vars.OCCLUSION_GROUP_BOTTOM + "_R")
vertex_group_all_r = add_vertex_group(obj, vars.OCCLUSION_GROUP_ALL + "_R")
mesh = obj.data
ul = mesh.uv_layers[0]
index = [0]
for poly in mesh.polygons:
for loop_index in poly.loop_indices:
loop_entry = mesh.loops[loop_index]
vertex = mesh.vertices[loop_entry.vertex_index]
uv = ul.data[loop_entry.index].uv
index[0] = vertex.index
slot = obj.material_slots[poly.material_index]
if slot.material == mat_left:
vertex_group_inner_l.add(index, uv.x, 'REPLACE')
vertex_group_outer_l.add(index, 1.0 - uv.x, 'REPLACE')
vertex_group_top_l.add(index, uv.y, 'REPLACE')
vertex_group_bottom_l.add(index, 1.0 - uv.y, 'REPLACE')
vertex_group_all_l.add([vertex.index], 1.0, 'REPLACE')
elif slot.material == mat_right:
vertex_group_inner_r.add(index, uv.x, 'REPLACE')
vertex_group_outer_r.add(index, 1.0 - uv.x, 'REPLACE')
vertex_group_top_r.add(index, uv.y, 'REPLACE')
vertex_group_bottom_r.add(index, 1.0 - uv.y, 'REPLACE')
vertex_group_all_r.add([vertex.index], 1.0, 'REPLACE')
def generate_tearline_vertex_groups(obj, mat, is_left=True, is_plus=False):
suffix = "_L" if is_left else "_R"
vertex_group_inner = add_vertex_group(obj, vars.TEARLINE_GROUP_INNER + suffix)
vertex_group_all = add_vertex_group(obj, vars.TEARLINE_GROUP_ALL + suffix)
mesh = obj.data
ul = mesh.uv_layers[0]
for poly in mesh.polygons:
slot = obj.material_slots[poly.material_index]
if slot.material == mat:
for loop_index in poly.loop_indices:
loop_entry = mesh.loops[loop_index]
vertex = mesh.vertices[loop_entry.vertex_index]
uv = ul.data[loop_entry.index].uv
if is_plus:
if is_left:
weight = utils.smoothstep(0.3, 0.0, uv.x) * (1.0 if uv.y < 0.5 else 0.0)
else:
weight = utils.smoothstep(0.7, 1.0, uv.x) * (1.0 if uv.y > 0.5 else 0.0)
else:
weight = 1.0 - utils.smoothstep(0, 0.1, abs(uv.x - 0.5))
vertex_group_inner.add([vertex.index], weight, 'REPLACE')
vertex_group_all.add([vertex.index], 1.0, 'REPLACE')
def rebuild_eye_vertex_groups(chr_cache):
if chr_cache:
for obj in chr_cache.get_cache_objects():
obj_cache = chr_cache.get_object_cache(obj)
if obj and obj_cache and obj_cache.is_eye() and not obj_cache.disabled:
mat_left, mat_right = materials.get_left_right_eye_materials(obj)
cache_left = chr_cache.get_material_cache(mat_left)
cache_right = chr_cache.get_material_cache(mat_right)
if cache_left and cache_right:
# Re-create the eye displacement group
generate_eye_vertex_groups(obj, mat_left, mat_right, cache_left, cache_right)
def generate_eye_vertex_groups(obj, mat_left, mat_right, cache_left, cache_right):
prefs = vars.prefs()
vertex_group_l = add_vertex_group(obj, prefs.eye_displacement_group + "_L")
vertex_group_r = add_vertex_group(obj, prefs.eye_displacement_group + "_R")
mesh = obj.data
ul = mesh.uv_layers[0]
for poly in mesh.polygons:
for loop_index in poly.loop_indices:
loop_entry = mesh.loops[loop_index]
vertex = mesh.vertices[loop_entry.vertex_index]
uv = ul.data[loop_entry.index].uv
x = uv.x - 0.5
y = uv.y - 0.5
radial = math.sqrt(x * x + y * y)
slot = obj.material_slots[poly.material_index]
if slot.material == mat_left:
sclera_scale = cache_left.parameters.eye_sclera_scale
iris_radius = cache_left.parameters.eye_iris_radius
radius = sclera_scale * (iris_radius / 0.16) * 0.128
#weight = 1.0 - utils.saturate(utils.smoothstep(0, radius, radial))
weight = utils.saturate(utils.remap(0, radius, 1.0, 0.0, radial))
vertex_group_l.add([vertex.index], weight, 'REPLACE')
elif slot.material == mat_right:
sclera_scale = cache_right.parameters.eye_iris_scale
iris_radius = cache_right.parameters.eye_iris_radius
radius = sclera_scale * (iris_radius / 0.16) * 0.128
#weight = 1.0 - utils.saturate(utils.smoothstep(0, radius, radial))
weight = utils.saturate(utils.remap(0, radius, 1.0, 0.0, radial))
vertex_group_r.add([vertex.index], weight, 'REPLACE')
def get_material_vertex_indices(obj, mat):
vert_indices = []
mesh = obj.data
for poly in mesh.polygons:
poly_mat = obj.material_slots[poly.material_index].material
if poly_mat == mat:
for vert_index in poly.vertices:
if vert_index not in vert_indices:
vert_indices.append(vert_index)
return vert_indices
def get_material_vertices(obj, mat):
"""Mesh Edit Mode"""
verts = []
mesh = obj.data
for poly in mesh.polygons:
poly_mat = obj.material_slots[poly.material_index].material
if poly_mat == mat:
for vert_index in poly.vertices:
if vert_index not in verts:
verts.append(mesh.vertices[vert_index])
return verts
def select_material_faces(obj, mat, select = True, deselect_first = False, include_edges = True, include_vertices = True):
mesh : bpy.types.Mesh = obj.data
poly : bpy.types.MeshPolygon
for poly in mesh.polygons:
poly_mat = obj.material_slots[poly.material_index].material
if deselect_first:
poly.select = False
if poly_mat == mat:
poly.select = select
if include_edges:
for edge_key in poly.edge_keys:
for edge_index in edge_key:
edge = mesh.edges[edge_index]
if deselect_first:
edge.select = False
if poly_mat == mat:
edge.select = select
if include_vertices:
for vertex_index in poly.vertices:
vertex = mesh.vertices[vertex_index]
if deselect_first:
vertex.select = False
if poly_mat == mat:
vertex.select = select
def remove_material_verts(obj, mat):
mesh = obj.data
utils.clear_selected_objects()
if utils.edit_mode_to(obj):
bpy.ops.mesh.select_all(action="DESELECT")
if utils.object_mode_to(obj):
for vert in mesh.vertices:
vert.select = False
for poly in mesh.polygons:
poly_mat = obj.material_slots[poly.material_index].material
if poly_mat == mat:
for vert_index in poly.vertices:
mesh.vertices[vert_index].select = True
if utils.edit_mode_to(obj):
bpy.ops.mesh.delete(type='VERT')
utils.object_mode_to(obj)
def find_shape_key(obj : bpy.types.Object, shape_key_name) -> bpy.types.ShapeKey:
try:
return obj.data.shape_keys.key_blocks[shape_key_name]
except:
return None
def objects_have_shape_key(objects, shape_key_name):
for obj in objects:
if find_shape_key(obj, shape_key_name) is not None:
return True
return False
def get_viseme_profile(objects):
for key_name in vars.CC4_VISEME_NAMES:
if objects_have_shape_key(objects, key_name):
return vars.CC4_VISEME_NAMES
for key_name in vars.DIRECT_VISEME_NAMES:
if objects_have_shape_key(objects, key_name):
return vars.DIRECT_VISEME_NAMES
# there is some overlap between CC4 facial expression names and CC3 viseme names
# so consider CC3 visemes last
return vars.CC3_VISEME_NAMES
def get_facial_profile(objects):
expressionProfile = "UNKNOWN"
visemeProfile = "UNKNOWN"
for obj in objects:
if obj.type != "MESH": continue
if (find_shape_key(obj, "Mouth_Funnel_UL") or
find_shape_key(obj, "Mouth_Funnel_UR") or
find_shape_key(obj, "Eye_Look_Up_L") or
find_shape_key(obj, "Eye_Look_Up_R") or
find_shape_key(obj, "Jaw_Clench_L") or
find_shape_key(obj, "Jaw_Clench_R")):
expressionProfile = "MH"
if (find_shape_key(obj, "Move_Jaw_Down") or
find_shape_key(obj, "Turn_Jaw_Down") or
find_shape_key(obj, "Move_Jaw_Down") or
find_shape_key(obj, "Move_Jaw_Down")):
expressionProfile = "TRA"
if (find_shape_key(obj, "A01_Brow_Inner_Up") or
find_shape_key(obj, "A06_Eye_Look_Up_Left") or
find_shape_key(obj, "A15_Eye_Blink_Right") or
find_shape_key(obj, "A25_Jaw_Open") or
find_shape_key(obj, "A37_Mouth_Close")):
if (expressionProfile == "UNKNOWN" or
expressionProfile == "STD"):
expressionProfile = "TRA"
if (find_shape_key(obj, "Ear_Up_L") or
find_shape_key(obj, "Ear_Up_R") or
find_shape_key(obj, "Eyelash_Upper_Up_L") or
find_shape_key(obj, "Eyelash_Upper_Up_R") or
find_shape_key(obj, "Mouth_Pucker_Up_R") or
find_shape_key(obj, "Mouth_Funnel_Up_R")):
if (expressionProfile == "UNKNOWN" or
expressionProfile == "STD"):
expressionProfile = "EXT"
if (find_shape_key(obj, "Mouth_L") or
find_shape_key(obj, "Mouth_R") or
find_shape_key(obj, "Mouth_Pucker") or
find_shape_key(obj, "Mouth_Funnel") or
find_shape_key(obj, "Eye_L_Look_L") or
find_shape_key(obj, "Eye_R_Look_R")):
if expressionProfile == "UNKNOWN":
expressionProfile = "STD"
if (find_shape_key(obj, "V_Open") or
find_shape_key(obj, "V_Tight") or
find_shape_key(obj, "V_Tongue_up") or
find_shape_key(obj, "V_Tongue_Raise")):
visemeProfile = "PAIRS4"
if (find_shape_key(obj, "Open") or
find_shape_key(obj, "Tight") or
find_shape_key(obj, "Tongue_up") or
find_shape_key(obj, "Tongue_Raise")):
if (visemeProfile == "PAIRS4" or
visemeProfile == "DIRECT"):
visemeProfile = "PAIRS3"
if (find_shape_key(obj, "AE") or
find_shape_key(obj, "EE") or
find_shape_key(obj, "Er") or
find_shape_key(obj, "Oh")):
if visemeProfile == "UNKNOWN":
visemeProfile = "DIRECT"
return expressionProfile, visemeProfile
def set_facial_profile(objects, facial_profile, viseme_profile):
for obj in objects:
if obj.type != "MESH": continue
if facial_profile != "NONE" and facial_profile != "UNKNOWN":
if (find_shape_key(obj, "Move_Jaw_Down") or
find_shape_key(obj, "Turn_Jaw_Down") or
find_shape_key(obj, "Move_Jaw_Down") or
find_shape_key(obj, "Move_Jaw_Down") or
find_shape_key(obj, "A01_Brow_Inner_Up") or
find_shape_key(obj, "A06_Eye_Look_Up_Left") or
find_shape_key(obj, "A15_Eye_Blink_Right") or
find_shape_key(obj, "A25_Jaw_Open") or
find_shape_key(obj, "A37_Mouth_Close") or
find_shape_key(obj, "Ear_Up_L") or
find_shape_key(obj, "Ear_Up_R") or
find_shape_key(obj, "Eyelash_Upper_Up_L") or
find_shape_key(obj, "Eyelash_Upper_Up_R") or
find_shape_key(obj, "Eye_L_Look_L") or
find_shape_key(obj, "Eye_R_Look_R") or
find_shape_key(obj, "Mouth_L") or
find_shape_key(obj, "Mouth_R") or
find_shape_key(obj, "Eye_Wide_L") or
find_shape_key(obj, "Eye_Wide_R") or
find_shape_key(obj, "Mouth_Smile") or
find_shape_key(obj, "Eye_Blink")):
utils.set_prop(obj, "rl_facial_profile", facial_profile)
if viseme_profile != "NONE" and viseme_profile != "UNKNOWN":
if (find_shape_key(obj, "V_Open") or
find_shape_key(obj, "V_Tight") or
find_shape_key(obj, "V_Tongue_up") or
find_shape_key(obj, "V_Tongue_Raise") or
find_shape_key(obj, "Open") or
find_shape_key(obj, "Tight") or
find_shape_key(obj, "Tongue_up") or
find_shape_key(obj, "Tongue_Raise") or
find_shape_key(obj, "AE") or
find_shape_key(obj, "EE") or
find_shape_key(obj, "Er") or
find_shape_key(obj, "Oh")):
utils.set_prop(obj, "rl_viseme_profile", viseme_profile)
def set_shading(obj, smooth=True):
if utils.object_exists_is_mesh(obj):
for poly in obj.data.polygons:
poly.use_smooth = smooth
obj.data.update()
def get_child_objects_with_vertex_groups(parent, group_names, objects = None):
if objects is None:
objects = []
for vg in parent.vertex_groups:
if vg.name in group_names:
objects.append(parent)
break
for child in parent.children:
get_child_objects_with_vertex_groups(child, group_names, objects)
return objects
def has_vertex_color_data(obj):
if obj and obj.type == "MESH":
if obj.data.vertex_colors and obj.data.vertex_colors.active:
color_map = obj.data.vertex_colors.active
for vcol_data in color_map.data:
color = vcol_data.color
for i in range(0,4):
if color[i] > 0.0:
return True
return False
def count_selected_vertices(obj):
count = 0
if bpy.context.mode == 'EDIT_MESH':
bm = bmesh.from_edit_mesh(obj.data)
for v in bm.verts:
if v.select:
count += 1
else:
for v in obj.data.vertices:
if v.select:
count += 1
return count
def separate_mesh_by_material_slots(obj: bpy.types.Object, slot_indices: list):
if obj:
if slot_indices:
if utils.edit_mode_to(obj, only_this=True):
bpy.ops.mesh.select_all(action='DESELECT')
for slot_index in slot_indices:
if len(obj.material_slots) > slot_index:
bpy.context.object.active_material_index = slot_index
bpy.ops.object.material_slot_select()
count = count_selected_vertices(obj)
if count > 0 and count < len(obj.data.vertices):
bpy.ops.mesh.separate(type="SELECTED")
if utils.object_mode():
for o in bpy.context.selected_objects:
if o != obj:
return o
return None
def separate_mesh_material_type(chr_cache, obj: bpy.types.Object, material_type: str):
if chr_cache and obj:
material_slots = []
if utils.object_exists_is_mesh(obj):
for slot in obj.material_slots:
mat = slot.material
if utils.material_exists(mat):
mat_cache = chr_cache.get_material_cache(mat)
if mat_cache and mat_cache.material_type == material_type:
material_slots.append(slot.slot_index)
if material_slots:
return separate_mesh_by_material_slots(obj, material_slots)
return None
def get_head_material_and_json(chr_cache, chr_json):
head_mat = None
head_mat_cache = None
head_mat_json = None
# find the head material in the character
for mat_cache in chr_cache.head_material_cache:
mat = mat_cache.material
if mat_cache.material_type == "SKIN_HEAD" and utils.material_exists(mat):
head_mat = mat
head_mat_cache = mat_cache
# find the head material json, from it's original json object
# the head material may have been split from the original body mesh,
# so we look in all the meshes for the head material
for obj in chr_cache.get_cache_objects():
obj_cache = chr_cache.get_object_cache(obj)
if obj.type == "MESH":
if head_mat.name in obj.data.materials:
mat_json = jsonutils.get_json(chr_json, f"Meshes/{obj_cache.source_name}/Materials/{head_mat_cache.source_name}")
if mat_json and jsonutils.get_json(mat_json, "Custom Shader/Shader Name") == "RLHead":
head_mat_json = mat_json
break
return head_mat, head_mat_json
def get_head_body_object_quick(chr_cache):
if chr_cache:
body_objects = chr_cache.get_objects_of_type("BODY")
for obj in body_objects:
if "wrinkle_source" in obj:
if obj["wrinkle_source"]:
return obj
return get_head_body_object(chr_cache)
return None
def get_eye_object(chr_cache):
# TODO merged expressions and morphs....
if chr_cache:
return chr_cache.get_objects_of_type("EYE")
return None
def get_tongue_object(chr_cache):
# TODO merged expressions and morphs....
if chr_cache:
return chr_cache.get_objects_of_type("TONGUE")
return None
def get_head_body_object(chr_cache):
if not chr_cache: return None
body_cache = chr_cache.get_body_cache()
arm = chr_cache.get_armature()
# collect all possible body objects together
head_bones = [ "CC_Base_Head", "head", "spine.006" ]
body_objects = {}
if body_cache:
body_id = body_cache.object_id
for child in arm.children:
if utils.get_rl_object_id(child) == body_id and child not in body_objects:
body_objects[child] = total_vertex_group_weight(child, head_bones)
else:
for child in arm.children:
if child not in body_objects:
body_objects[child] = total_vertex_group_weight(child, head_bones)
# try to find which one contains the head (contains the most weight to head bone)
weight = -1
body = None
if body_objects:
for obj in body_objects:
try:
del obj["wrinkle_source"]
except: ...
if body_objects[obj] > weight:
weight = body_objects[obj]
body = obj
# fall back to the imported source body if nothing works
if not body:
body = chr_cache.get_body()
if body:
try:
body["wrinkle_source"] = True
except: ...
return body
LASH_DATA = None
def store_lash_data(chr_cache):
global LASH_DATA
# copy body
body_obj = utils.duplicate_object(get_head_body_object(chr_cache))
head_obj = separate_mesh_material_type(chr_cache, body_obj, "SKIN_HEAD")
lash_obj = separate_mesh_material_type(chr_cache, body_obj, "EYELASH")
utils.log_always(f"HEAD: {head_obj.name}")
utils.log_always(f"LASH: {lash_obj.name}")
utils.delete_object(body_obj)
mesh = lash_obj.data
head_mesh = head_obj.data
ul = mesh.uv_layers[0]
verts_done = set()
verts = {}
i = 0
for poly in mesh.polygons:
for loop_index in poly.loop_indices:
loop_entry = mesh.loops[loop_index]
if loop_entry.vertex_index not in verts_done:
i += 1
verts_done.add(loop_entry.vertex_index)
vertex = mesh.vertices[loop_entry.vertex_index]
lash_co = vertex.co
lash_uv = ul.data[loop_entry.index].uv
uv_id = lash_uv.to_tuple(5)
success, closest_local_co, closest_local_no, closest_face_index = head_obj.closest_point_on_mesh(lash_co)
head_co = closest_local_co
dir: mathutils.Vector = (lash_co - head_co)
head_dist = dir.length
head_dir = dir.normalized()
head_uv = uv_from_point(head_mesh, head_co, closest_face_index)
verts[uv_id] = (lash_uv.copy(), head_uv.copy(), head_dist, head_dir)
utils.delete_object(lash_obj)
utils.delete_object(head_obj)
LASH_DATA = verts
def restore_lash_data(chr_cache):
diag = geom.diag_mesh_create()
global LASH_DATA
body_obj = get_head_body_object(chr_cache)
lash_index = materials.get_material_slot_by_type(chr_cache, body_obj, "EYELASH")
head_index = materials.get_material_slot_by_type(chr_cache, body_obj, "SKIN_HEAD")
body_tm = geom.get_triangulated_bmesh(body_obj)
mesh: bpy.types.Mesh = body_obj.data
ul = mesh.uv_layers[0]
verts_done = set()
poly: bpy.types.MeshPolygon
vertex: bpy.types.MeshVertex
for poly in mesh.polygons:
if poly.material_index == lash_index:
for loop_index in poly.loop_indices:
loop_entry = mesh.loops[loop_index]
if loop_entry.vertex_index not in verts_done:
verts_done.add(loop_entry.vertex_index)
vertex = mesh.vertices[loop_entry.vertex_index]
lash_uv = ul.data[loop_entry.index].uv.copy()
uv_id = lash_uv.to_tuple(5)
if uv_id in LASH_DATA:
old_lash_uv, head_uv, head_dist, head_dir = LASH_DATA[uv_id]
head_co = geom.get_local_from_uv(body_obj, body_tm, head_index, head_uv.to_3d(), 0.001)
geom.diag_mesh_add_vert(head_co)
target_co = head_co + (head_dir * head_dist)
vertex.co = target_co.copy()
geom.diag_finish()
mesh.update()
def uv_from_point(mesh: bpy.types.Mesh, co, face_index):
ul = mesh.uv_layers[0]
poly: bpy.types.MeshPolygon = mesh.polygons[face_index]
num_verts = len(poly.loop_indices)
num_tris = num_verts - 2
loop0 = mesh.loops[poly.loop_indices[0]]
v0 = mesh.vertices[loop0.vertex_index].co
uv0 = ul.data[loop0.index].uv.to_3d()
for i in range(0, num_tris):
j = i + 1
k = i + 2
loopj = mesh.loops[poly.loop_indices[j]]
vj = mesh.vertices[loopj.vertex_index].co
uvj = ul.data[loopj.index].uv.to_3d()
loopk = mesh.loops[poly.loop_indices[k]]
vk = mesh.vertices[loopk.vertex_index].co
uvk = ul.data[loopk.index].uv.to_3d()
uv = mathutils.geometry.barycentric_transform(co, v0, vj, vk, uv0, uvj, uvk)
if mathutils.geometry.intersect_point_tri_2d(uv, uv0, uvj, uvk):
uv = mathutils.Vector((uv.x, uv.y))
return uv
# otherwise return the uv coords of the face vertex nearest to the co
d = (v0 - co).length
uv = ul.data[loop0.index].uv
for i in range(1, num_verts):
loopi = mesh.loops[poly.loop_indices[i]]
vi = mesh.vertices[loopi.vertex_index].co
di = (vi - co).length
if di < d:
d = di
uv = ul.data[loopi.index].uv
return uv
@@ -0,0 +1,512 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
from . import geom, materials, meshutils, utils, vars
MOD_MULTIRES = "MULTIRES"
MOD_MULTIRES_NAME = "Multi_Res_Sculpt"
def get_object_modifier(obj, type, name = "", before_type=None):
if obj is not None:
for mod in obj.modifiers:
if before_type and mod.type == before_type:
return None
if name == "":
if mod.type == type:
return mod
else:
if mod.type == type and mod.name.startswith(vars.NODE_PREFIX) and name in mod.name:
return mod
return None
def remove_object_modifiers(obj, modifier_type=None, modifier_name="", except_mods: list = None):
to_remove = []
if except_mods:
keep_names = [mod.name for mod in except_mods]
else:
keep_names = []
if obj is not None:
for mod in obj.modifiers:
if mod.name in keep_names:
continue
if modifier_name == "":
if not modifier_type or mod.type == modifier_type:
to_remove.append(mod)
else:
if (not modifier_type or mod.type == modifier_type) and mod.name.startswith(vars.NODE_PREFIX) and modifier_name in mod.name:
to_remove.append(mod)
for mod in to_remove:
obj.modifiers.remove(mod)
# Modifier order
#
def move_mod_last(obj, mod):
try:
if bpy.context.view_layer.objects.active is not obj:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
num_mods = len(obj.modifiers)
if mod is not None:
max = len(obj.modifiers) + 1
while obj.modifiers.find(mod.name) < num_mods - 1:
bpy.ops.object.modifier_move_down(modifier=mod.name)
if max == 0:
return True
max -= 1
except Exception as e:
utils.log_error("Unable to move to last, modifier: " + mod.name, e)
return False
def move_mod_first(obj, mod):
try:
if bpy.context.view_layer.objects.active is not obj:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
if mod is not None:
max = len(obj.modifiers) + 1
while obj.modifiers.find(mod.name) > 0:
bpy.ops.object.modifier_move_up(modifier=mod.name)
if max == 0:
return True
max -= 1
except Exception as e:
utils.log_error("Unable to move to first, modifier: " + mod.name, e)
return False
def get_armature_modifier(obj, create=False, armature=None):
mod = None
if obj is not None:
for m in obj.modifiers:
if m.type == "ARMATURE":
mod = m
break
if create and not mod:
mod = obj.modifiers.new(name="Armature", type="ARMATURE")
if mod and armature:
mod.object = armature
return mod
# Physics modifiers
#
def get_cloth_physics_mod(obj):
if obj is not None:
for mod in obj.modifiers:
if mod.type == "CLOTH":
return mod
return None
def get_collision_physics_mod(obj):
if obj is not None:
for mod in obj.modifiers:
if mod.type == "COLLISION":
return mod
return None
def has_cloth_weight_map_mods(obj):
if obj is not None:
for mod in obj.modifiers:
if mod.type == "VERTEX_WEIGHT_EDIT" and vars.NODE_PREFIX in mod.name:
return True
if mod.type == "VERTEX_WEIGHT_MIX" and vars.NODE_PREFIX in mod.name:
return True
return False
def get_material_weight_map_mods(obj, mat):
edit_mod = None
mix_mod = None
if obj is not None and mat is not None:
mat_name = utils.safe_export_name(mat.name)
for mod in obj.modifiers:
if mod.type == "VERTEX_WEIGHT_EDIT" and (vars.NODE_PREFIX + mat_name + "_WeightEdit") in mod.name:
edit_mod = mod
if mod.type == "VERTEX_WEIGHT_MIX" and (vars.NODE_PREFIX + mat_name + "_WeightMix") in mod.name:
mix_mod = mod
return edit_mod, mix_mod
# Displacement mods
#
def init_displacement_mod(obj, mod, group_name, direction, strength):
if mod and obj:
if group_name is not None:
mod.vertex_group = group_name
mod.mid_level = 0
mod.strength = strength
mod.direction = direction
mod.space = "LOCAL"
def fix_eye_mod_order(obj):
"""Moves the armature modifier to the end of the list
"""
edit_mod = get_object_modifier(obj, "VERTEX_WEIGHT_EDIT", "Eye_WeightEdit")
displace_mod = get_object_modifier(obj, "DISPLACE", "Eye_Displace")
warp_mod = get_object_modifier(obj, "UV_WARP", "Eye_UV_Warp")
move_mod_first(warp_mod)
move_mod_first(displace_mod)
move_mod_first(edit_mod)
def remove_eye_modifiers(obj):
if obj and obj.type == "MESH":
for mod in obj.modifiers:
if vars.NODE_PREFIX in mod.name:
if mod.type == "DISPLACE" or mod.type == "UV_WARP" or mod.type == "VERTEX_WEIGHT_EDIT":
obj.modifiers.remove(mod)
def add_eye_modifiers(obj):
props = vars.props()
prefs = vars.prefs()
# fetch the eye materials (not the cornea materials)
mat_left, mat_right = materials.get_left_right_eye_materials(obj)
cache_left = props.get_material_cache(mat_left)
cache_right = props.get_material_cache(mat_right)
# Create the eye displacement group
meshutils.generate_eye_vertex_groups(obj, mat_left, mat_right, cache_left, cache_right)
remove_eye_modifiers(obj)
if cache_left and cache_left.material_type == "EYE_LEFT":
displace_mod_l = obj.modifiers.new(utils.unique_name("Eye_Displace_L"), "DISPLACE")
warp_mod_l = obj.modifiers.new(utils.unique_name("Eye_UV_Warp_L"), "UV_WARP")
init_displacement_mod(obj, displace_mod_l, prefs.eye_displacement_group + "_L", "Y", 1.5 * cache_left.parameters.eye_iris_depth)
warp_mod_l.center = (0.5, 0.5)
warp_mod_l.axis_u = "X"
warp_mod_l.axis_v = "Y"
warp_mod_l.vertex_group = prefs.eye_displacement_group + "_L"
warp_mod_l.scale = (1.0 / cache_left.parameters.eye_pupil_scale, 1.0 / cache_left.parameters.eye_pupil_scale)
move_mod_first(obj, warp_mod_l)
move_mod_first(obj, displace_mod_l)
if cache_right and cache_right.material_type == "EYE_RIGHT":
displace_mod_r = obj.modifiers.new(utils.unique_name("Eye_Displace_R"), "DISPLACE")
warp_mod_r = obj.modifiers.new(utils.unique_name("Eye_UV_Warp_R"), "UV_WARP")
init_displacement_mod(obj, displace_mod_r, prefs.eye_displacement_group + "_R", "Y", 1.5 * cache_right.parameters.eye_iris_depth)
warp_mod_r.center = (0.5, 0.5)
warp_mod_r.axis_u = "X"
warp_mod_r.axis_v = "Y"
warp_mod_r.vertex_group = prefs.eye_displacement_group + "_R"
warp_mod_r.scale = (1.0 / cache_right.parameters.eye_pupil_scale, 1.0 / cache_right.parameters.eye_pupil_scale)
move_mod_first(obj, warp_mod_r)
move_mod_first(obj, displace_mod_r)
utils.log_info("Eye Displacement modifiers applied to: " + obj.name)
def add_eye_occlusion_modifiers(obj):
props = vars.props()
prefs = vars.prefs()
mat_left, mat_right = materials.get_left_right_materials(obj)
cache_left = props.get_material_cache(mat_left)
cache_right = props.get_material_cache(mat_right)
# generate the vertex groups for occlusion displacement
meshutils.generate_eye_occlusion_vertex_groups(obj, mat_left, mat_right)
remove_eye_modifiers(obj)
if cache_left and cache_left.material_type == "OCCLUSION_LEFT":
# re-create create the displacement modifiers
displace_mod_inner_l = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Inner_L"), "DISPLACE")
displace_mod_outer_l = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Outer_L"), "DISPLACE")
displace_mod_top_l = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Top_L"), "DISPLACE")
displace_mod_bottom_l = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Bottom_L"), "DISPLACE")
displace_mod_all_l = obj.modifiers.new(utils.unique_name("Occlusion_Displace_All_L"), "DISPLACE")
# initialise displacement mods
init_displacement_mod(obj, displace_mod_inner_l, vars.OCCLUSION_GROUP_INNER + "_L", "NORMAL", cache_left.parameters.eye_occlusion_inner)
init_displacement_mod(obj, displace_mod_outer_l, vars.OCCLUSION_GROUP_OUTER + "_L", "NORMAL", cache_left.parameters.eye_occlusion_outer)
init_displacement_mod(obj, displace_mod_top_l, vars.OCCLUSION_GROUP_TOP + "_L", "NORMAL", cache_left.parameters.eye_occlusion_top)
init_displacement_mod(obj, displace_mod_bottom_l, vars.OCCLUSION_GROUP_BOTTOM + "_L", "NORMAL", cache_left.parameters.eye_occlusion_bottom)
init_displacement_mod(obj, displace_mod_all_l, vars.OCCLUSION_GROUP_ALL + "_L", "NORMAL", cache_left.parameters.eye_occlusion_displace)
# fix mod order
move_mod_first(obj, displace_mod_inner_l)
move_mod_first(obj, displace_mod_outer_l)
move_mod_first(obj, displace_mod_top_l)
move_mod_first(obj, displace_mod_bottom_l)
move_mod_first(obj, displace_mod_all_l)
if cache_right and cache_right.material_type == "OCCLUSION_RIGHT":
# re-create create the displacement modifiers
displace_mod_inner_r = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Inner_R"), "DISPLACE")
displace_mod_outer_r = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Outer_R"), "DISPLACE")
displace_mod_top_r = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Top_R"), "DISPLACE")
displace_mod_bottom_r = obj.modifiers.new(utils.unique_name("Occlusion_Displace_Bottom_R"), "DISPLACE")
displace_mod_all_r = obj.modifiers.new(utils.unique_name("Occlusion_Displace_All_R"), "DISPLACE")
# initialise displacement mods
init_displacement_mod(obj, displace_mod_inner_r, vars.OCCLUSION_GROUP_INNER + "_R", "NORMAL", cache_right.parameters.eye_occlusion_inner)
init_displacement_mod(obj, displace_mod_outer_r, vars.OCCLUSION_GROUP_OUTER + "_R", "NORMAL", cache_right.parameters.eye_occlusion_outer)
init_displacement_mod(obj, displace_mod_top_r, vars.OCCLUSION_GROUP_TOP + "_R", "NORMAL", cache_right.parameters.eye_occlusion_top)
init_displacement_mod(obj, displace_mod_bottom_r, vars.OCCLUSION_GROUP_BOTTOM + "_R", "NORMAL", cache_right.parameters.eye_occlusion_bottom)
init_displacement_mod(obj, displace_mod_all_r, vars.OCCLUSION_GROUP_ALL + "_R", "NORMAL", cache_right.parameters.eye_occlusion_displace)
# fix mod order
move_mod_first(obj, displace_mod_inner_r)
move_mod_first(obj, displace_mod_outer_r)
move_mod_first(obj, displace_mod_top_r)
move_mod_first(obj, displace_mod_bottom_r)
move_mod_first(obj, displace_mod_all_r)
utils.log_info("Eye Occlusion Displacement modifiers applied to: " + obj.name)
def add_tearline_modifiers(obj):
props = vars.props()
prefs = vars.prefs()
mat_left, mat_right = materials.get_left_right_materials(obj)
cache_left = props.get_material_cache(mat_left)
cache_right = props.get_material_cache(mat_right)
remove_eye_modifiers(obj)
if cache_left and cache_left.is_tearline():
is_plus = cache_left.material_type == "TEARLINE_PLUS_LEFT"
# generate the vertex groups for tearline displacement
meshutils.generate_tearline_vertex_groups(obj, mat_left, True, is_plus)
# re-create create the displacement modifiers
displace_mod_inner_l = obj.modifiers.new(utils.unique_name("Tearline_Displace_Inner_L"), "DISPLACE")
displace_mod_all_l = obj.modifiers.new(utils.unique_name("Tearline_Displace_All_L"), "DISPLACE")
# initialise displacement mods
init_displacement_mod(obj, displace_mod_inner_l, vars.TEARLINE_GROUP_INNER + "_L", "Y", -cache_left.parameters.tearline_inner)
init_displacement_mod(obj, displace_mod_all_l, vars.TEARLINE_GROUP_ALL + "_L", "Y", -cache_left.parameters.tearline_displace)
# fix mod order
move_mod_first(obj, displace_mod_inner_l)
move_mod_first(obj, displace_mod_all_l)
if cache_right and cache_right.is_tearline():
is_plus = cache_right.material_type == "TEARLINE_PLUS_RIGHT"
# generate the vertex groups for tearline displacement
meshutils.generate_tearline_vertex_groups(obj, mat_right, False, is_plus)
# re-create create the displacement modifiers
displace_mod_inner_r = obj.modifiers.new(utils.unique_name("Tearline_Displace_Inner_R"), "DISPLACE")
displace_mod_all_r = obj.modifiers.new(utils.unique_name("Tearline_Displace_All_R"), "DISPLACE")
# initialise displacement mods
init_displacement_mod(obj, displace_mod_inner_r, vars.TEARLINE_GROUP_INNER + "_R", "Y", -cache_right.parameters.tearline_inner)
init_displacement_mod(obj, displace_mod_all_r, vars.TEARLINE_GROUP_ALL + "_R", "Y", -cache_right.parameters.tearline_displace)
# fix mod order
move_mod_first(obj, displace_mod_inner_r)
move_mod_first(obj, displace_mod_all_r)
utils.log_info("Tearline Displacement modifiers applied to: " + obj.name)
def add_decimate_modifier(obj, ratio, name):
mod: bpy.types.DecimateModifier
mod = get_object_modifier(obj, "DECIMATE", name)
if not mod:
mod = obj.modifiers.new(utils.unique_name(name), "DECIMATE")
mod.decimate_type = 'COLLAPSE'
mod.ratio = ratio
return mod
def add_subdivision(obj: bpy.types.Object, level, name, max_render=3, max_view=1):
mod: bpy.types.SubsurfModifier
mod = get_object_modifier(obj, "SUBSURF", name)
if not mod:
mod = obj.modifiers.new(utils.unique_name(name), "SUBSURF")
mod.render_levels = min(max_render, level)
mod.levels = min(max_view, level)
mod.subdivision_type = "CATMULL_CLARK"
mod.show_only_control_edges = True
mod.uv_smooth = 'PRESERVE_BOUNDARIES'
mod.boundary_smooth = 'PRESERVE_CORNERS'
mod.use_creases = True
mod.use_custom_normals = True
return mod
def add_multi_res_modifier(obj, subdivisions, use_custom_normals = False, uv_smooth = "PRESERVE_BOUNDARIES", quality = 4):
if utils.set_active_object(obj):
mod : bpy.types.MultiresModifier
mod = get_object_modifier(obj, "MULTIRES", "Multi_Res_Sculpt")
if not mod:
mod = obj.modifiers.new(utils.unique_name("Multi_Res_Sculpt"), "MULTIRES")
try:
mod.use_custom_normals = use_custom_normals
except:
pass
mod.uv_smooth = uv_smooth
mod.quality = quality
if mod:
for i in range(0, subdivisions):
bpy.ops.object.multires_subdivide(modifier=mod.name, mode='CATMULL_CLARK')
return mod
def get_multi_res_mod(obj):
if obj is not None:
for mod in obj.modifiers:
if mod.type == "MULTIRES":
return mod
return None
def has_modifier(obj, modifier_type):
if obj is not None:
for mod in obj.modifiers:
if mod.type == modifier_type:
return True
return False
def apply_modifier(obj, modifier=None, type=None, preserving=False):
if obj:
if not modifier and type:
for mod in obj.modifiers:
if mod.type == type:
modifier = mod
break
if modifier:
if preserving or utils.object_has_shape_keys(obj):
copy = utils.duplicate_object(obj)
utils.object_mode_to(copy)
utils.set_only_active_object(copy)
utils.remove_all_shape_keys(copy)
remove_object_modifiers(copy, except_mods=[modifier])
bpy.ops.object.modifier_apply(modifier=modifier.name)
geom.copy_vert_positions_by_uv_id(copy, obj, flatten_udim=False)
utils.delete_mesh_object(copy)
else:
utils.object_mode_to(obj)
utils.set_only_active_object(obj)
bpy.ops.object.modifier_apply(modifier=modifier.name)
def copy_base_shape(src_obj, dest_obj):
geom.copy_vert_positions_by_uv_id(src_obj, dest_obj, accuracy = 5, flatten_udim=False)
def remove_material_weight_maps(obj, mat):
"""Removes the weight map 'Vertex Weight Edit' modifier for the object's material.
This does not remove or delete the weight map image or temporary packed image,
or the texture based on the weight map image, just the modifier.
"""
edit_mod, mix_mod = get_material_weight_map_mods(obj, mat)
if edit_mod is not None:
utils.log_info("Removing weight map vertex edit modifer: " + edit_mod.name)
obj.modifiers.remove(edit_mod)
if mix_mod is not None:
utils.log_info("Removing weight map vertex mix modifer: " + mix_mod.name)
obj.modifiers.remove(mix_mod)
def add_material_weight_map_modifier(obj, mat, weight_map, vertex_group, normalize = False):
"""Attaches a weight map to the object's material via a 'Vertex Weight Edit' modifier.
This will attach the supplied weight map or will try to find an existing weight map,
but will not create a new weight map if it doesn't already exist.
"""
if obj is None or mat is None or weight_map is None or not vertex_group:
return
# Make or re-use a texture based on the weight map image
mat_name = utils.strip_name(mat.name)
tex_name = mat_name + "_Weight"
tex = None
for t in bpy.data.textures:
if t.name.startswith(vars.NODE_PREFIX + tex_name):
tex = t
if tex is None:
tex = bpy.data.textures.new(utils.unique_name(tex_name), "IMAGE")
utils.log_info("Texture: " + tex.name + " created for weight map transfer")
else:
utils.log_info("Texture: " + tex.name + " already exists for weight map transfer")
tex.image = weight_map
# Create the physics pin vertex group and the material weightmap group if they don't exist:
mix_group = vertex_group
material_group = vertex_group + "_" + mat_name
if mix_group not in obj.vertex_groups:
pin_vertex_group = obj.vertex_groups.new(name = mix_group)
else:
pin_vertex_group = obj.vertex_groups[mix_group]
if material_group not in obj.vertex_groups:
weight_vertex_group = obj.vertex_groups.new(name = material_group)
else:
weight_vertex_group = obj.vertex_groups[material_group]
# The material weight map group should contain only those vertices affected by the material, default weight to 1.0
meshutils.clear_vertex_group(obj, weight_vertex_group)
mat_vert_indices = meshutils.get_material_vertex_indices(obj, mat)
weight_vertex_group.add(mat_vert_indices, 1.0, 'ADD')
# The pin group should contain all vertices in the mesh default weighted to 1.0
meshutils.set_vertex_group(obj, pin_vertex_group, 1.0)
# set the pin group in the cloth physics modifier
mod_cloth = get_cloth_physics_mod(obj)
if mod_cloth is not None:
mod_cloth.settings.vertex_group_mass = mix_group
# re-create or create the Vertex Weight Edit modifier and the Vertex Weight Mix modifer
remove_material_weight_maps(obj, mat)
edit_mod : bpy.types.VertexWeightEditModifier
edit_mod = obj.modifiers.new(utils.unique_name(mat_name + "_WeightEdit"), "VERTEX_WEIGHT_EDIT")
mix_mod = obj.modifiers.new(utils.unique_name(mat_name + "_WeightMix"), "VERTEX_WEIGHT_MIX")
# Use the texture as the modifiers vertex weight source
edit_mod.mask_texture = tex
# Setup the modifier to generate the inverse of the weight map in the vertex group
edit_mod.use_add = False
edit_mod.use_remove = False
edit_mod.add_threshold = 0.01
edit_mod.remove_threshold = 0.01
edit_mod.vertex_group = material_group
edit_mod.default_weight = 1
edit_mod.falloff_type = 'LINEAR'
edit_mod.invert_falloff = True
edit_mod.mask_constant = 1
edit_mod.mask_tex_mapping = 'UV'
edit_mod.mask_tex_use_channel = 'INT'
try:
if normalize:
edit_mod.normalize = True
except:
pass
# The Vertex Weight Mix modifier takes the material weight map group and mixes it into the pin weight group:
# (this allows multiple weight maps from different materials and UV layouts to combine in the same mesh)
mix_mod.vertex_group_a = mix_group
mix_mod.vertex_group_b = material_group
mix_mod.invert_mask_vertex_group = True
mix_mod.default_weight_a = 1
mix_mod.default_weight_b = 1
mix_mod.mix_set = 'B' #'ALL'
mix_mod.mix_mode = 'SET'
mix_mod.invert_mask_vertex_group = False
utils.log_info("Weight map: " + weight_map.name + " applied to: " + obj.name + "/" + mat.name)
return edit_mod, mix_mod
@@ -0,0 +1,124 @@
import bpy
from mathutils import Vector
from . import nodeutils, utils, vars
def normal_to_height(normal_image: bpy.types.Image, height_image: bpy.types.Image, iterations = 10):
pixels = pixels = list(normal_image.pixels)
w = int(normal_image.size[0])
h = int(normal_image.size[1])
l = w*h
N: Vector
D: Vector
T: Vector
# convert to normal vectors
normals = [None]*l
for i in range(0, l):
p = i*4
x = 2*pixels[p]-1
y = 2*pixels[p+1]-1
z = 2*pixels[p+2]-1
N = Vector((x,y,z))
#N.normalize()
normals[i] = N
directional_displacements = []
utils.log_always("Building directional displacements")
for j in range(-1, 2):
for k in range(-1, 2):
D = Vector((j, -k, 0))
if k == 0 and j == 0:
directional_displacements.append(None)
else:
displacement_map = [0]*l
for i in range(0, l):
N = normals[i]
a = N.dot(D)
T = D - N*a
T.normalize()
d = T.z * 0.5 * D.length
displacement_map[i] = d
directional_displacements.append(displacement_map)
heights = [0]*l
for itx in range(0, iterations):
utils.log_always(f"iteration: {itx}")
for v in range(0, h):
for u in range(0, w):
i = u+v*w
height = 0
for j in range(-1, 2, 1):
uu = min(max(u+j, 0), w-1)
for k in range(-1, 2, 1):
if j == 0 and k == 0: continue
vv = min(max(v+k, 0), h-1)
ii = uu + vv*w
jk = j + 3*k + 4
d = directional_displacements[jk][ii] + directional_displacements[jk][i]
height += heights[ii] - d
height /= 8
heights[i] = height
min_height = 999999
max_height = -999999
abs_height = 0
for i in range(0, l):
min_height = min(min_height, heights[i])
max_height = max(max_height, heights[i])
abs_height = max(abs_height, abs(heights[i]))
utils.log_always(f"min: {min_height} max: {max_height} abs: {abs_height}")
pixels = list(height_image.pixels)
for i in range(0, l):
p = i * 4
h = min(5*0.5*heights[i]/abs_height + 0.5,1)
pixels[p] = h
pixels[p+1] = h
pixels[p+2] = h
height_image.pixels[:] = pixels
def build_displacement_system(chr_cache, mat_cache):
mat: bpy.types.Material = mat_cache.material
nodes = mat.node_tree.nodes
links = mat.node_tree.links
normal_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", "(NORMAL)")
normal1_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", "(WRINKLENORMAL1)")
normal2_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", "(WRINKLENORMAL2)")
normal3_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", "(WRINKLENORMAL3)")
blend_normal_node = nodeutils.find_node_by_type_and_keywords(nodes, "TEX_IMAGE", "(NORMALBLEND)")
image = bpy.data.images["3K1L562.png"]
if "TEST_HEIGHT" in bpy.data.images:
height_image = bpy.data.images["TEST_HEIGHT"]
height_image.scale(image.size[0], image.size[1])
else:
height_image = bpy.data.images.new("TEST_HEIGHT", image.size[0], image.size[1], is_data=True)
height_image.pixels[0] = 0
normal_to_height(image, height_image, 5)
return
@@ -0,0 +1,702 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
import socket
import os, tempfile
from mathutils import Vector
from . import addon_updater_ops, colorspace, vars
MAX_TEX_SIZES = [
("1024","1024 x 1024","1024 x 1024 texture size"),
("2048","2048 x 2048","2048 x 2048 texture size"),
("4096","4096 x 4096","4096 x 4096 texture size"),
("8192","8192 x 8192","8192 x 8192 texture size"),
]
def reset_eevee():
prefs: CC3ToolsAddonPreferences = vars.prefs()
prefs.eevee_iris_brightness_b443b = 0.75
prefs.eevee_sss_skin_b443b = 1.43
prefs.eevee_sss_hair_b443b = 1.0
prefs.eevee_sss_teeth_b443b = 1.5
prefs.eevee_sss_tongue_b443b = 1.0
prefs.eevee_sss_eyes_b443b = 1.0
prefs.eevee_sss_default_b443b = 1.0
prefs.eevee_normal_b443b = 1.0
prefs.eevee_normal_skin_b443b = 1.0
prefs.eevee_micro_normal_b443b = 1.0
prefs.eevee_roughness_power_b443b = 0.5625
#
prefs.eevee_sss_skin_b341 = 1.0
prefs.eevee_sss_hair_b341 = 1.0
prefs.eevee_sss_teeth_b341 = 1.0
prefs.eevee_sss_tongue_b341 = 1.0
prefs.eevee_sss_eyes_b341 = 1.0
prefs.eevee_sss_default_b341 = 1.0
prefs.eevee_normal_b341 = 1.0
prefs.eevee_normal_skin_b341 = 1.0
prefs.eevee_micro_normal_b341 = 1.0
prefs.eevee_roughness_power_b341 = 0.75
def reset_cycles():
prefs: CC3ToolsAddonPreferences = vars.prefs()
prefs.cycles_iris_brightness_b443b = 0.75
prefs.cycles_sss_skin_b443b = 1.0
prefs.cycles_sss_hair_b443b = 0.5
prefs.cycles_sss_teeth_b443b = 1.0
prefs.cycles_sss_tongue_b443b = 1.0
prefs.cycles_sss_eyes_b443b = 1.0
prefs.cycles_sss_default_b443b = 1.0
prefs.cycles_normal_b443b = 1.0
prefs.cycles_normal_skin_b443b = 1.25
prefs.cycles_micro_normal_b443b = 1.25
prefs.cycles_roughness_power_b443b = 0.75
#
prefs.cycles_sss_skin_b341 = 0.264
prefs.cycles_sss_hair_b341 = 0.05
prefs.cycles_sss_teeth_b341 = 0.5
prefs.cycles_sss_tongue_b341 = 0.5
prefs.cycles_sss_eyes_b341 = 0.01
prefs.cycles_sss_default_b341 = 0.5
prefs.cycles_normal_b341 = 1.0
prefs.cycles_normal_skin_b341 = 1.125
prefs.cycles_micro_normal_b341 = 1.25
prefs.cycles_roughness_power_b341 = 1.0
def reset_rigify():
prefs: CC3ToolsAddonPreferences = vars.prefs()
prefs.rigify_export_t_pose = True
prefs.rigify_export_mode = "MOTION"
prefs.rigify_export_naming = "METARIG"
prefs.rigify_expression_rig = "META"
prefs.rigify_auto_retarget = True
prefs.rigify_preview_shape_keys = True
prefs.rigify_limit_control_range = False
prefs.rigify_bake_shape_keys = True
prefs.rigify_preview_retarget_fk_ik = "BOTH"
prefs.rigify_bake_nla_fk_ik = "BOTH"
prefs.rigify_align_bones = "METARIG"
prefs.rigify_face_control_color = (1.0, 0.88, 0.11, 1.0)
def reset_datalink():
prefs: CC3ToolsAddonPreferences = vars.prefs()
prefs.datalink_auto_start = False
prefs.datalink_frame_sync = False
prefs.datalink_preview_shape_keys = True
prefs.datalink_match_client_rate = True
prefs.datalink_retarget_prop_actions = True
prefs.datalink_disable_tweak_bones = True
prefs.datalink_hide_prop_bones = True
prefs.datalink_send_mode = "ACTIVE"
prefs.datalink_confirm_mismatch = True
prefs.datalink_confirm_replace = True
def reset_preferences():
prefs: CC3ToolsAddonPreferences = vars.prefs()
prefs.quality_lighting = "CC3"
prefs.pipeline_lighting = "CC3"
prefs.morph_lighting = "MATCAP"
prefs.quality_mode = "ADVANCED"
prefs.pipeline_mode = "ADVANCED"
prefs.morph_mode = "ADVANCED"
prefs.log_level = "ERRORS"
prefs.hair_hint = "hair,scalp,beard,mustache,sideburns,ponytail,braid,!bow,!band,!tie,!ribbon,!ring,!butterfly,!flower"
prefs.hair_scalp_hint = "scalp,base,skullcap"
prefs.debug_mode = False
prefs.physics_group = "CC_Physics"
prefs.refractive_eyes = "PARALLAX"
prefs.eye_displacement_group = "CC_Eye_Displacement"
prefs.max_texture_size = 4096
prefs.export_json_changes = True
prefs.export_texture_changes = True
prefs.export_legacy_bone_roll_fix = False
prefs.export_bake_nodes = False
prefs.export_bake_bump_to_normal = True
prefs.export_unity_remove_objects = True
prefs.export_texture_size = "2048"
prefs.export_require_key = True
prefs.export_legacy_revert_material_names = False
prefs.import_auto_convert = True
prefs.auto_convert_materials = True
prefs.import_deduplicate = True
prefs.import_reset_custom_normals = False
prefs.build_pack_texture_channels = False
prefs.build_pack_wrinkle_diffuse_roughness = False
prefs.build_reuse_baked_channel_packs = True
prefs.build_limit_textures = False
prefs.build_skin_shader_dual_spec = False
prefs.build_shape_key_bone_drivers_jaw = False
prefs.build_shape_key_bone_drivers_eyes = False
prefs.build_shape_key_bone_drivers_head = False
prefs.build_body_key_drivers = False
prefs.bake_use_gpu = False
prefs.build_armature_edit_modifier = True
prefs.build_armature_preserve_volume = False
prefs.physics_weightmap_curve = 5.0
prefs.convert_non_standard_type = "PROP"
reset_cycles()
reset_rigify()
reset_datalink()
def set_view_transform(self, context):
prefs: CC3ToolsAddonPreferences = vars.prefs()
view = context.scene.view_settings
try:
view.view_transform = prefs.lighting_use_look
except:
pass
def check_datalink_host(self, context):
prefs = vars.prefs()
if prefs.datalink_host and prefs.datalink_bad_hostname:
try:
link_host_ip = socket.gethostbyname(prefs.datalink_host)
prefs.datalink_bad_hostname = False
except:
prefs.datalink_bad_hostname = True
class CC3OperatorPreferences(bpy.types.Operator):
"""CC3 Preferences Functions"""
bl_idname = "cc3.setpreferences"
bl_label = "CC3 Preferences Functions"
bl_options = {"REGISTER", "UNDO", "INTERNAL"}
param: bpy.props.StringProperty(
name = "param",
default = ""
)
def execute(self, context):
if self.param == "RESET_CYCLES":
reset_cycles()
if self.param == "RESET_EEVEE":
reset_eevee()
if self.param == "RESET_DATALINK":
reset_datalink()
if self.param == "RESET_PREFS":
reset_preferences()
return {"FINISHED"}
@classmethod
def description(cls, context, properties):
if properties.param == "RESET_PREFS":
return "Reset preferences to defaults"
return ""
class CC3ToolsAddonPreferences(bpy.types.AddonPreferences):
# this must match the add-on name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __name__.partition(".")[0]
quality_lighting: bpy.props.EnumProperty(items=[
("BLENDER","Blender Default","Blenders default lighting setup"),
("MATCAP","Solid Matcap","Solid shading matcap lighting for sculpting / mesh editing"),
("CC3","CC3 Default","Replica of CC3 default lighting setup"),
("STUDIO","Studio Right","Right facing 3 point lighting with the studio hdri"),
("COURTYARD","Courtyard Left","Left facing soft 3 point lighting with the courtyard hdri"),
], default="CC3", name = "Render / Quality Lighting")
pipeline_lighting: bpy.props.EnumProperty(items=[
("BLENDER","Blender Default","Blenders default lighting setup"),
("MATCAP","Solid Matcap","Solid shading matcap lighting for sculpting / mesh editing"),
("CC3","CC3 Default","Replica of CC3 default lighting setup"),
("STUDIO","Studio Right","Right facing 3 point lighting with the studio hdri"),
("COURTYARD","Courtyard Left","Left facing soft 3 point lighting with the courtyard hdri"),
], default="CC3", name = "(FBX) Accessory Editing Lighting")
morph_lighting: bpy.props.EnumProperty(items=[
("BLENDER","Blender Default","Blenders default lighting setup"),
("MATCAP","Solid Matcap","Solid shading matcap lighting for sculpting / mesh editing"),
("CC3","CC3 Default","Replica of CC3 default lighting setup"),
("STUDIO","Studio Right","Right facing 3 point lighting with the studio hdri"),
("COURTYARD","Courtyard Left","Left facing soft 3 point lighting with the courtyard hdri"),
], default="MATCAP", name = "(OBJ) Morph Edit Lighting")
quality_mode: bpy.props.EnumProperty(items=[
("BASIC","Basic Materials","Build basic PBR materials for quality / rendering"),
("ADVANCED","Advanced Materials","Build advanced materials for quality / rendering"),
], default="ADVANCED", name = "Render / Quality Material Mode")
# = accessory_mode
pipeline_mode: bpy.props.EnumProperty(items=[
("BASIC","Basic Materials","Build basic PBR materials for character morph / accessory editing"),
("ADVANCED","Advanced Materials","Build advanced materials for character morph / accessory editing"),
], default="ADVANCED", name = "Accessory Material Mode")
morph_mode: bpy.props.EnumProperty(items=[
("BASIC","Basic Materials","Build basic PBR materials for character morph / accessory editing"),
("ADVANCED","Advanced Materials","Build advanced materials for character morph / accessory editing"),
], default="ADVANCED", name = "Character Morph Material Mode")
log_level: bpy.props.EnumProperty(items=[
("ALL","All","Log everything to console."),
("WARN","Warnings & Errors","Log warnings and error messages to console."),
("ERRORS","Just Errors","Log only errors to console."),
("DETAILS","Details","All including details."),
], default="ERRORS", name = "(Debug) Log Level")
hair_hint: bpy.props.StringProperty(default="hair,scalp,beard,mustache,sideburns,ponytail,braid,!bow,!band,!tie,!ribbon,!ring,!butterfly,!flower", name="Hair detection keywords")
hair_scalp_hint: bpy.props.StringProperty(default="scalp,base,skullcap", name="Scalp detection keywords")
debug_mode: bpy.props.BoolProperty(default=False)
export_require_key: bpy.props.BoolProperty(default=True, name="Export Require Key", description="Ensure that exports back to CC3 have a valid Fbx/Obj Key file")
export_json_changes: bpy.props.BoolProperty(default=True, name="Material Parameters", description="Export all material and shader parameter changes to the character Json data. Setting to False keeps original material and shader parameters.")
export_texture_changes: bpy.props.BoolProperty(default=True, name="Textures", description="Export all texture changes to the character Json data. Setting to False keeps original textures.")
export_legacy_bone_roll_fix: bpy.props.BoolProperty(default=False, name="Teeth Bone Fix", description="(Experimental) Apply zero roll to upper and lower teeth bones to fix teeth alignment problems re-importing to CC3")
export_bake_nodes: bpy.props.BoolProperty(default=True, name="Bake Custom Nodes", description="(Very Experimental) Bake any custom nodes (non texture image) attached to shader texture map sockets on export.")
export_bake_bump_to_normal: bpy.props.BoolProperty(default=True, name="Combine Normals", description="(Very Experimental) When both a bump map and a normal is present, bake the bump map into the normal. (CC3 materials can only have one, normal map or bump map.)")
export_unity_remove_objects: bpy.props.BoolProperty(default=True, name="Unity: Remove Non-Character Objects.", description="Removes all objects not attached to the character, when exporting to Unity.")
# revert materials is off by default now as CC4 deduplicates by material name even if they are not the same material.
export_legacy_revert_material_names: bpy.props.BoolProperty(default=False, name="Revert Material Names", description="Revert material names to match their original names from the source Json. Note: This may only be needed for exporting back CC3 or if there are problems with duplicate materials exporting back to CC4.")
export_unity_mode: bpy.props.EnumProperty(items=[
("BLEND","Blend File","Save the project as a blend file in a Unity project. All textures and folders will be copied to the new location and made relative to the blend file."),
("FBX","FBX","Export the character as an .Fbx file to the specified location. All textures and folders will be copied."),
], default="BLEND", name = "Unity Export")
export_non_standard_mode: bpy.props.EnumProperty(items=[
("HUMANOID","Humanoid","Export the selected armature and objects as a humanoid .Fbx file, with generated .json data for import into CC4 (Only)"),
("CREATURE","Creature","Export the selected armature and objects as a creature .Fbx file, with generated .json data for import into CC4 (Only)"),
("PROP","Prop","Export the selected objects as a prop .Fbx file, with generated .json data for import into CC4 (Only)"),
], default="HUMANOID", name = "Non-standard Export")
export_texture_size: bpy.props.EnumProperty(items=vars.ENUM_TEX_LIST, default="2048",
name="Export Texture Size",
description="Size of procedurally generated textures to bake")
physics_group: bpy.props.StringProperty(default="CC_Physics", name="Physics Vertex Group Prefix")
refractive_eyes: bpy.props.EnumProperty(items=[
("PARALLAX","Parallax Eye","(Experimental) Approximatated Parallax Refraction in a single cornea material which is not subject to Eevee limitations on Subsurface scattering and receiving shadows."),
("SSR","SSR Eye","Screen Space Refraction with a transmissive & transparent cornea material over an opaque eye (iris) material. SSR Materials do not receive full shadows and cannot have Subsurface scattering in Eevee."),
], default="SSR", name = "Refractive Eyes")
detail_sculpt_sub_target: bpy.props.EnumProperty(items=[
("HEAD","Head","Sculpt on the head only"),
("BODY","Body","Sculpt on the body only"),
("ALL","All","Sculpt the entire body"),
], default="HEAD", name = "Sculpt Target")
detail_multires_level: bpy.props.IntProperty(default=4, min = 1, max = 6, name="Level",
description="Starting multi-resolution level for detail sculpting")
sculpt_multires_level: bpy.props.IntProperty(default=2, min = 1, max = 6, name="Level",
description="Starting multi-resolution level for body sculpting")
detail_normal_bake_size: bpy.props.EnumProperty(items=vars.ENUM_TEX_LIST, default="4096", description="Resolution of detail sculpt normals to bake")
body_normal_bake_size: bpy.props.EnumProperty(items=vars.ENUM_TEX_LIST, default="2048", description="Resolution of full body sculpt normals to bake")
aces_srgb_override: bpy.props.EnumProperty(items=colorspace.fetch_all_color_spaces, default=0, description="ACES Color space to override for sRGB textures")
aces_data_override: bpy.props.EnumProperty(items=colorspace.fetch_data_color_spaces, default=0, description="ACES Color space to override for Non-Color or Linear textures")
#refractive_eyes: bpy.props.BoolProperty(default=True, name="Refractive Eyes", description="Generate refractive eyes with iris depth and pupil scale parameters")
eye_displacement_group: bpy.props.StringProperty(default="CC_Eye_Displacement", name="Eye Displacement Group", description="Eye Iris displacement vertex group name")
build_limit_textures: bpy.props.BoolProperty(default=False, name="Limit Textures",
description="Attempt to limit the number of imported textures to 8 or less. This is to attempt to address problems with OSX hardware limitations allowing only 8 active textures in a material.\n"
"Note: This will mean the head material will be simpler than intended and no wrinkle map system is possible. "
"Also this will force on texture channel packing to reduce textures on all materials, which will slow down imports significantly")
build_pack_texture_channels: bpy.props.BoolProperty(default=False, name="Pack Texture Channels",
description="Pack compatible linear texture channels to reduce texture lookups.\n\n"
"Note: This will significantly increase import time.\n\n"
"Note: Wrinkle map textures are always channel packed to reduce texture load")
build_pack_wrinkle_diffuse_roughness: bpy.props.BoolProperty(default=False, name="Wrinkle Maps into Diffuse Alpha",
description="Packs wrinkle map roughness channels into the diffuse alpha channels. This will free up one more texture slot in the skin head material")
build_reuse_baked_channel_packs: bpy.props.BoolProperty(default=True, name="Reuse Channel Packs",
description="Reuse existing channel packs on material rebuild, otherwise rebake the texture channel packs")
build_armature_edit_modifier: bpy.props.BoolProperty(default=True, name="Use Edit Modifier",
description="Automatically set to use armature modifier in mesh edit mode for all armature modifiers in the character. (i.e. edit in place)")
build_armature_preserve_volume: bpy.props.BoolProperty(default=False, name="Preserve Volume",
description="Automatically set use preserve volume for all armature modifiers in the character")
build_skin_shader_dual_spec: bpy.props.BoolProperty(default=False, name="Dual Specular Skin",
description="Use a dual specular skin shader arrangement")
build_shape_key_bone_drivers_jaw: bpy.props.BoolProperty(default=True, name="Shape Keys Drive Jaw Bone",
description="Add drivers to the jaw bone from facial expression shape keys")
build_shape_key_bone_drivers_eyes: bpy.props.BoolProperty(default=True, name="Shape Keys Drive Eye Bones",
description="Add drivers to the eye bones from facial expression shape keys")
build_shape_key_bone_drivers_head: bpy.props.BoolProperty(default=False, name="Shape Keys Drive Head Bone",
description="Add drivers to the head bone from facial expression shape keys.\nNote: Not usually needed. Only enable if you want the head tilt to be controlled *only* by the shape-keys")
build_body_key_drivers: bpy.props.BoolProperty(default=True, name="Body Shape Keys Drive All",
description="Add drivers so that all shape keys on the character are driven by the body shape keys. " \
"(So that only the body shape keys need to be animated or controlled)")
max_texture_size: bpy.props.FloatProperty(default=4096, min=512, max=4096)
import_reset_custom_normals: bpy.props.BoolProperty(default=False, name="Reset Custom Normals",
description="Reset the custom normals on all imported meshes (can help resolve lighting artifacts)")
import_deduplicate: bpy.props.BoolProperty(default=True, name="De-duplicate Materials",
description="Detects and re-uses duplicate textures and consolidates materials with same name, textures and parameters into a single material")
import_auto_convert: bpy.props.BoolProperty(default=True, name="Auto Convert Generic",
description="When importing generic characters (GLTF, GLB, VRM or OBJ) automatically convert to Reallusion Non-Standard characters or props."
"Which sets up Reallusion import compatible materials and material parameters")
auto_convert_materials: bpy.props.BoolProperty(default=True, name="Auto Convert Materials",
description="When importing generic characters (GLTF, GLB, VRM or OBJ) or adding new objects to a charcater, automatically convert materials to custom Reallusion compatible materials.")
# weight transfer blend
weight_blend_distance_min: bpy.props.FloatProperty(default=0.015, min=0.0, soft_max=0.05, max=1.0,
subtype="DISTANCE", precision=3,
name="Blend Min Distance",
description="Distance for full body weights")
weight_blend_distance_max: bpy.props.FloatProperty(default=0.05, min=0.0, soft_max=0.25, max=1.0,
subtype="DISTANCE", precision=3,
name="Blend Max Distance",
description="Distance for full source blend weights")
weight_blend_distance_range: bpy.props.FloatProperty(default=25, min=0, max=100, subtype="PERCENTAGE",
name="Blend Range",
description="Range from Blend Min Distance to the maximum body distance for each mesh to use as the Blend Max Distance")
weight_blend_use_range: bpy.props.BoolProperty(default=False,
name="Auto Range",
description="Use an automatically calculated Distance Blend Max based on a percentage of the largest distance to the selected mesh from the body. Otherwise use a fixed distance for the Distance Blend Max")
weight_blend_selected_only: bpy.props.BoolProperty(default=False,
name="Selected Verts",
description="Only blender the weights for the selected vertices in each mesh")
# Eevee Modifiers
eevee_iris_brightness_b443b: bpy.props.FloatProperty(default=0.75, min=0, max=2)
eevee_sss_skin_b443b: bpy.props.FloatProperty(default=1.43)
eevee_sss_hair_b443b: bpy.props.FloatProperty(default=1.0)
eevee_sss_teeth_b443b: bpy.props.FloatProperty(default=1.5)
eevee_sss_tongue_b443b: bpy.props.FloatProperty(default=1.0)
eevee_sss_eyes_b443b: bpy.props.FloatProperty(default=1.0)
eevee_sss_default_b443b: bpy.props.FloatProperty(default=1.0)
eevee_normal_b443b: bpy.props.FloatProperty(default=1.0)
eevee_normal_skin_b443b: bpy.props.FloatProperty(default=1.0)
eevee_micro_normal_b443b: bpy.props.FloatProperty(default=1.0)
eevee_roughness_power_b443b: bpy.props.FloatProperty(default=0.5625)
#
eevee_sss_skin_b341: bpy.props.FloatProperty(default=1.0)
eevee_sss_hair_b341: bpy.props.FloatProperty(default=1.0)
eevee_sss_teeth_b341: bpy.props.FloatProperty(default=1.0)
eevee_sss_tongue_b341: bpy.props.FloatProperty(default=1.0)
eevee_sss_eyes_b341: bpy.props.FloatProperty(default=1.0)
eevee_sss_default_b341: bpy.props.FloatProperty(default=1.0)
eevee_normal_b341: bpy.props.FloatProperty(default=1.0)
eevee_normal_skin_b341: bpy.props.FloatProperty(default=1.0)
eevee_micro_normal_b341: bpy.props.FloatProperty(default=1.0)
eevee_roughness_power_b341: bpy.props.FloatProperty(default=0.75)
#
# Cycles Modifiers
cycles_iris_brightness_b443b: bpy.props.FloatProperty(default=0.75, min=0, max=2)
cycles_sss_skin_b443b: bpy.props.FloatProperty(default=1.0)
cycles_sss_hair_b443b: bpy.props.FloatProperty(default=0.5)
cycles_sss_teeth_b443b: bpy.props.FloatProperty(default=1.0)
cycles_sss_tongue_b443b: bpy.props.FloatProperty(default=1.0)
cycles_sss_eyes_b443b: bpy.props.FloatProperty(default=1.0)
cycles_sss_default_b443b: bpy.props.FloatProperty(default=1.0)
cycles_normal_b443b: bpy.props.FloatProperty(default=1.0)
cycles_normal_skin_b443b: bpy.props.FloatProperty(default=1.25)
cycles_micro_normal_b443b: bpy.props.FloatProperty(default=1.25)
cycles_roughness_power_b443b: bpy.props.FloatProperty(default=0.75)
#
cycles_sss_skin_b341: bpy.props.FloatProperty(default=0.264)
cycles_sss_hair_b341: bpy.props.FloatProperty(default=0.05)
cycles_sss_teeth_b341: bpy.props.FloatProperty(default=0.5)
cycles_sss_tongue_b341: bpy.props.FloatProperty(default=0.5)
cycles_sss_eyes_b341: bpy.props.FloatProperty(default=0.01)
cycles_sss_default_b341: bpy.props.FloatProperty(default=0.5)
cycles_normal_b341: bpy.props.FloatProperty(default=1.0)
cycles_normal_skin_b341: bpy.props.FloatProperty(default=1.125)
cycles_micro_normal_b341: bpy.props.FloatProperty(default=1.25)
cycles_roughness_power_b341: bpy.props.FloatProperty(default=1.0)
lighting_presets_all: bpy.props.BoolProperty(default=False,
name="Show All Lighting Presets",
description="Show / hide hidden lighting presets")
lighting_use_look: bpy.props.EnumProperty(items=[
("Filmic","Filmic","Use Filmic display space"),
("AgX","AgX","Use AgX display space"),
], default="AgX", name="Color management display space", update=set_view_transform)
bake_use_gpu: bpy.props.BoolProperty(default=False, description="Bake on the GPU for faster more accurate baking.", name="GPU Bake")
bake_objects_mode: bpy.props.EnumProperty(items=[
("ALL","All","Bake all character objects"),
("SELECTED","Selected","Bake only selected characeter objects"),
], default="ALL", name = "Character object bake mode")
use_max_tex_size: bpy.props.BoolProperty(default=False, name="Limit Texture Size", description="Limit texture sizes by texture category")
size_max_tex_default: bpy.props.EnumProperty(items=MAX_TEX_SIZES, default="2048", name="Default", description="Mid level detail textures such as: diffuse, roughness, metallic")
size_max_tex_detail: bpy.props.EnumProperty(items=MAX_TEX_SIZES, default="4096", name="Detail", description="Textures that require more details such as normals, displacements & cavity maps")
size_max_tex_minimal: bpy.props.EnumProperty(items=MAX_TEX_SIZES, default="1024", name="Minimal", description="Textures that don't need much detail: Subsurface, transmission and masks")
physics_cloth_hair: bpy.props.BoolProperty(default=True, description="Set up cloth physics on the hair objects.", name="Hair Cloth Physics")
physics_cloth_clothing: bpy.props.BoolProperty(default=True, description="Set up cloth physics on the clothing and accessory objects.", name="Clothing Cloth Physics")
physics_weightmap_curve: bpy.props.FloatProperty(default=5.0, min=1.0, max=10.0, name="Physics Weightmap Curve",
description="Power curve used to convert PhysX weightmaps to blender vertex pin weights.")
# rigify prefs
rigify_preview_shape_keys: bpy.props.BoolProperty(default=True, name="Retarget Shape Keys",
description="Retarget any facial expression and viseme shape key actions on the source character rig to the current character meshes on the rigify rig")
rigify_bake_shape_keys: bpy.props.BoolProperty(default=True, name="Bake Shape Keys",
description="Bake facial expression and viseme shape keys to new shapekey actions on the character")
rigify_export_t_pose: bpy.props.BoolProperty(default=True, name="Include T-Pose", description="Include a T-Pose as the first animation track. This is useful for correct avatar alignment in Unity and for importing animations back into CC4")
rigify_export_mode: bpy.props.EnumProperty(items=[
("MESH","Mesh","Export only the character mesh and materials, with no animation (other than a Unity T-pose)"),
("MOTION","Motion","Export the animation only, with minimal mesh and no materials. Shapekey animations will also export their requisite mesh objects"),
("BOTH","Both","Export both the character mesh with materials and the animation"),
], default="MOTION",
name="Export Mode")
rigify_export_naming: bpy.props.EnumProperty(items=[
("METARIG","Metarig","Use metarig bone names without a Root bone.\n" \
"For exporting animations to CC4/iClone, or other applications.\n" \
"Note: CC4 will auto detect a blender meta-rig, but you must use the generated hik (.3dxProfile) profile to import animations back into CC4"),
("RIGIFY","Rigify","Use custom Rigify_Base_ bone names with a Rigify_Base_Root bone. \n" \
"*Warning*: Does not import correctly back into CC4!"),
("CC","CC Base","Use original CC_Base_ bone names with a CC_Base_Root bone. \n" \
"Bones are exported in their original CC rig orientations where possible. \n" \
"For exporting animations and characters to Unity and be compatible with the Unity auto-setup.\n" \
"*Warning*: Does not import correctly back into CC4!"),
], default="METARIG", description="Bone names to use when exporting Rigify characters and motions.",
name="Export Bone Naming")
rigify_expression_rig: bpy.props.EnumProperty(items=[
("NONE","None","No expression rig, just eye and jaw controls"),
("RIGIFY","Rigify","Rigify full face rig"),
("META","CC5 HD","HD Face Control expression rig"),
], default="META", name="Expression Rig")
rigify_face_control_color: bpy.props.FloatVectorProperty(subtype="COLOR", size=4,
default=(1.0, 0.95, 0.4, 1.0),
min = 0.0, max = 1.0,
name="Rig Color")
rigify_auto_retarget: bpy.props.BoolProperty(default=True,
name="Auto Retarget",
description="Auto retarget any animation currently on the character armature")
rigify_limit_control_range: bpy.props.BoolProperty(default=False,
name="Limit Control Range",
description="When using limit constraints, hard limit the control range of the constrained control")
rigify_preview_retarget_fk_ik: bpy.props.EnumProperty(items=[
("FK","FK","Retarget to FK controls only"),
("IK","IK","Retarget to IK controls only"),
("BOTH","Both","Retarget to both FK and IK controls"),
], default="BOTH", name = "Retarget to FK/IK")
rigify_bake_nla_fk_ik: bpy.props.EnumProperty(items=[
("FK","FK","Bake FK controls only"),
("IK","IK","Bake IK controls only"),
("BOTH","Both","Bake both FK and IK and controls"),
], default="BOTH", name = "Bake NLA to FK/IK")
rigify_align_bones: bpy.props.EnumProperty(items=[
("CC","CC/iC","Align metarig bones to the CC/iC source rig"),
("METARIG","Metarig","Keep the metarig bone alignments"),
], default="METARIG", name="Align Metarig Bones", description="Metarig bone alignments")
temp_folder: bpy.props.StringProperty(default="", subtype="DIR_PATH", name="Temp Folder",
description="Folder to save exports and temporary files in when the Blend file is not yet saved."
"If not set, a random temporary folder in the system temp files will be used")
# datalink prefs
datalink_auto_start: bpy.props.BoolProperty(default=False,
description="Attempt to (re)start the DataLink connection when ever Blender is started or reloaded")
datalink_frame_sync: bpy.props.BoolProperty(default=False,
description="Force the live sequence transfer to stop and render every frame")
datalink_preview_shape_keys: bpy.props.BoolProperty(default=True,
description="Previewing shape keys during live sequence transfer results in slower frame rates. It can be disabled to speed up the transfer")
datalink_match_client_rate: bpy.props.BoolProperty(default=True,
description="When sending a live sequence, attempt to match the transfer frame rate. Causes less frame jumping in the live preview")
datalink_retarget_prop_actions: bpy.props.BoolProperty(default=True,
description="As props do not have a default bind pose, each prop animation has a different rest pose " \
"which means the animation must be retargeted to (if checked) or the rest pose must be adjusted to "\
"match the incoming motion (not checked)")
datalink_disable_tweak_bones: bpy.props.BoolProperty(default=True,
description="Tweak bones cause bone length stretching which is largely incompatible with CC/iC animations. This option disables the stretch constraint to leg tweak bones so that the feet target correctly")
datalink_hide_prop_bones: bpy.props.BoolProperty(default=True,
description="Hide internal prop bones")
datalink_send_mode: bpy.props.EnumProperty(items=[
("ALL","All","Send all materials in the selected meshes", "RESTRICT_SELECT_OFF", 0),
("ACTIVE","Active","Send only the active material in each of the selected meshes", "RESTRICT_SELECT_ON", 1),
], default="ACTIVE",
name = "DataLink Send Mode")
datalink_match_any_avatar: bpy.props.BoolProperty(default=True,
description="When sending items and animations from CC4, always match with the current avatar: i.e. if it is the only one in the scene or the one selected")
datalink_confirm_mismatch: bpy.props.BoolProperty(default=True,
description="When importing motions from a non-matching character: import motion onto selected character without confirming")
datalink_confirm_replace: bpy.props.BoolProperty(default=True,
description="Replace matching character imports without confirming")
datalink_host: bpy.props.StringProperty(default="localhost", update=check_datalink_host)
datalink_bad_hostname: bpy.props.BoolProperty(default=False)
datalink_target: bpy.props.EnumProperty(items=[
("LOCAL","Local Machine","Connect to a DataLink server running on the local machine"),
("REMOTE","Remote Host","Connect to a DataLink server running on a remote machine"),
], default="LOCAL", name = "DataLink Target")
datalink_auto_lighting: bpy.props.BoolProperty(default=True,
description="Use automatic lighting from CC/iC Go-B")
# convert
convert_non_standard_type: bpy.props.EnumProperty(items=[
("HUMANOID","Humanoid","Non standard character is a Humanoid"),
("CREATURE","Creature","Non standard character is a Creature"),
("PROP","Prop","Non standard character is a Prop"),
], default="PROP", name = "Non-standard Character Type")
# addon updater preferences
auto_check_update: bpy.props.BoolProperty(
name="Auto-check for Update",
description="If enabled, auto-check for updates using an interval",
default=False,
)
updater_intrval_months: bpy.props.IntProperty(
name='Months',
description="Number of months between checking for updates",
default=0,
min=0
)
updater_intrval_days: bpy.props.IntProperty(
name='Days',
description="Number of days between checking for updates",
default=7,
min=0,
max=31
)
updater_intrval_hours: bpy.props.IntProperty(
name='Hours',
description="Number of hours between checking for updates",
default=0,
min=0,
max=23
)
updater_intrval_minutes: bpy.props.IntProperty(
name='Minutes',
description="Number of minutes between checking for updates",
default=0,
min=0,
max=59
)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.label(text="Import:")
grid = layout.grid_flow(row_major=True, columns=2)
grid.prop(self, "import_reset_custom_normals")
grid.prop(self, "import_deduplicate")
grid.prop(self, "import_auto_convert")
grid.prop(self, "auto_convert_materials")
grid.prop(self, "build_limit_textures")
grid.prop(self, "build_pack_texture_channels")
grid.prop(self, "build_pack_wrinkle_diffuse_roughness")
grid.prop(self, "build_armature_edit_modifier")
grid.prop(self, "build_armature_preserve_volume")
grid.prop(self, "build_skin_shader_dual_spec")
grid.prop(self, "build_shape_key_bone_drivers_jaw")
grid.prop(self, "build_shape_key_bone_drivers_eyes")
grid.prop(self, "build_shape_key_bone_drivers_head")
grid.prop(self, "build_body_key_drivers")
layout.label(text="Rendering:")
layout.prop(self, "bake_use_gpu")
if colorspace.is_aces():
layout.label(text="OpenColorIO ACES")
layout.prop(self, "aces_srgb_override")
layout.prop(self, "aces_data_override")
layout.label(text="Material settings:")
layout.prop(self, "quality_mode")
layout.prop(self, "pipeline_mode")
layout.prop(self, "morph_mode")
layout.label(text="Lighting:")
layout.prop(self, "quality_lighting")
layout.prop(self, "pipeline_lighting")
layout.prop(self, "morph_lighting")
layout.label(text="Detection:")
layout.prop(self, "hair_hint")
layout.prop(self, "hair_scalp_hint")
layout.label(text="Eyes:")
layout.prop(self, "refractive_eyes")
layout.prop(self, "eye_displacement_group")
layout.label(text="Physics:")
layout.prop(self, "physics_group")
layout.prop(self, "physics_weightmap_curve")
layout.label(text="Rigify:")
grid = layout.grid_flow(row_major=True, columns=2)
grid.prop(self, "rigify_preview_shape_keys")
grid.prop(self, "rigify_bake_shape_keys")
grid.prop(self, "rigify_export_t_pose")
grid.prop(self, "rigify_auto_retarget")
grid.prop(self, "rigify_limit_control_range")
grid = layout.grid_flow(row_major=True, columns=2)
grid.prop(self, "rigify_align_bones")
grid.prop(self, "rigify_export_mode")
grid.prop(self, "rigify_export_naming")
grid.prop(self, "rigify_expression_rig")
grid.prop(self, "rigify_face_control_color")
grid.prop(self, "rigify_preview_retarget_fk_ik")
grid.prop(self, "rigify_bake_nla_fk_ik")
layout.label(text="Export:")
grid = layout.grid_flow(row_major=True, columns=2)
grid.prop(self, "export_json_changes")
grid.prop(self, "export_texture_changes")
grid.prop(self, "export_legacy_bone_roll_fix")
grid.prop(self, "export_bake_nodes")
grid.prop(self, "export_bake_bump_to_normal")
grid.prop(self, "export_unity_remove_objects")
grid.prop(self, "export_require_key")
layout.prop(self, "export_texture_size")
layout.label(text="Convert:")
layout.prop(self, "convert_non_standard_type")
layout.label(text="Debug Settings:")
layout.prop(self, "log_level")
op = layout.operator("cc3.setpreferences", icon="FILE_REFRESH", text="Reset to Defaults")
op.param = "RESET_PREFS"
addon_updater_ops.update_settings_ui(self,context)
class MATERIAL_UL_weightedmatslots(bpy.types.UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, _index):
slot = item
ma = slot.material
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if ma:
layout.prop(ma, "name", text="", emboss=False, icon_value=icon)
else:
layout.label(text="", icon_value=icon)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
@@ -0,0 +1,229 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
import os
from mathutils import Vector
from . import rigutils, modifiers, bones, utils, vars
def hide_sub_bones(rig, hide=True):
"""Hides twist and share bones"""
bone: bpy.types.Bone
for bone in rig.data.bones:
bone_name: str = bone.name
if "ShareBone" in bone_name or ("Twist" in bone_name and "NeckTwist" not in bone_name) or "_twist_" in bone_name:
bone.hide = hide
bones.select_bone(rig, bone, False)
def convert_to_blender_bone_names(chr_cache):
if chr_cache and not chr_cache.rigified and not chr_cache.proportion_editing:
rig = chr_cache.get_armature()
objects = chr_cache.get_all_objects(include_armature=False,
include_children=True,
of_type="MESH")
bone_remap = {}
for bone in rig.data.bones:
source_name: str = bone.name
bone_name = source_name
if "_L_" in bone.name:
bone_name = bone_name.replace("_L_", "_X_") + ".l"
bone_remap[bone.name] = bone_name
bone.name = bone_name
if "_R_" in bone.name:
bone_name = bone_name.replace("_R_", "_X_") + ".r"
bone_remap[bone.name] = bone_name
bone.name = bone_name
for obj in objects:
for vg in obj.vertex_groups:
if vg.name in bone_remap:
vg.name = bone_remap[vg.name]
chr_cache.proportion_editing = True
def restore_cc_bone_names(chr_cache):
if chr_cache and not chr_cache.rigified and chr_cache.proportion_editing:
rig = chr_cache.get_armature()
objects = chr_cache.get_all_objects(include_armature=False,
include_children=True,
of_type="MESH")
bone_restore = {}
for bone in rig.data.bones:
bone_name: str = bone.name
if "_X_" in bone.name and bone.name.endswith(".l"):
bone_name = bone_name.replace("_X_", "_L_")[:-2]
bone_restore[bone.name] = bone_name
bone.name = bone_name
if "_X_" in bone.name and bone.name.endswith(".r"):
bone_name = bone_name.replace("_X_", "_R_")[:-2]
bone_restore[bone.name] = bone_name
bone.name = bone_name
for obj in objects:
for vg in obj.vertex_groups:
if vg.name in bone_restore:
vg.name = bone_restore[vg.name]
chr_cache.proportion_editing = False
def prep_rig(chr_cache):
if chr_cache:
rig = chr_cache.get_armature()
rigutils.fix_cc3_standard_rig(rig)
rigutils.select_rig(rig)
if rig:
chr_cache.proportion_editing_in_front = rig.show_in_front
rig_action = utils.safe_get_action(rig)
chr_cache.proportion_editing_actions.clear()
if rig_action:
action_store = chr_cache.proportion_editing_actions.add()
action_store.object = rig
action_store.action = rig_action
utils.safe_set_action(rig, None)
rig.pose.use_mirror_x = True
bones.clear_pose(rig)
utils.pose_mode_to(rig)
hide_sub_bones(rig)
rig.show_in_front = True
# reset all shape keys
objects = chr_cache.get_all_objects(include_armature=False,
include_children=True,
of_type="MESH")
for obj in objects:
if obj.data.shape_keys and obj.data.shape_keys.key_blocks:
key_action = utils.safe_get_action(obj.data.shape_keys)
if key_action:
action_store = chr_cache.proportion_editing_actions.add()
action_store.object = obj
action_store.action = key_action
utils.safe_set_action(obj.data.shape_keys, None)
key: bpy.types.ShapeKey
for key in obj.data.shape_keys.key_blocks:
key.value = 0.0
def restore_rig(chr_cache):
if chr_cache:
rig = chr_cache.get_armature()
if rig:
# restore actions
for action_store in chr_cache.proportion_editing_actions:
obj = action_store.object
action = action_store.action
if utils.object_exists_is_armature(obj):
utils.safe_set_action(obj, action)
elif utils.object_exists_is_mesh(obj):
utils.safe_set_action(obj.data.shape_keys, action)
chr_cache.proportion_editing_actions.clear()
# restore rig
utils.object_mode_to(rig)
hide_sub_bones(rig, False)
rig.show_in_front = chr_cache.proportion_editing_in_front
chr_cache.proportion_editing_action = None
def apply_proportion_pose(chr_cache):
if chr_cache:
rig = chr_cache.get_armature()
if rig:
hide_sub_bones(rig, False)
rigutils.apply_as_rest_pose(rig)
def set_child_inherit_scale(rig, pose_bone: bpy.types.PoseBone, inherit_scale):
child_bone: bpy.types.PoseBone
pose_bones = [pose_bone]
if rig.pose.use_mirror_x:
mirror_name = None
if pose_bone.name.endswith(".r"):
mirror_name = pose_bone.name[:-1] + "l"
elif pose_bone.name.endswith(".R"):
mirror_name = pose_bone.name[:-1] + "L"
elif pose_bone.name.endswith(".l"):
mirror_name = pose_bone.name[:-1] + "r"
elif pose_bone.name.endswith(".L"):
mirror_name = pose_bone.name[:-1] + "R"
if mirror_name and mirror_name in rig.pose.bones:
pose_bones.append(rig.pose.bones[mirror_name])
for pose_bone in pose_bones:
for child_bone in pose_bone.children:
bone_name = child_bone.name
if "ShareBone" in bone_name or ("Twist" in bone_name and "NeckTwist" not in bone_name):
child_bone.bone.inherit_scale = "FULL"
else:
child_bone.bone.inherit_scale = inherit_scale
def reset_proportions(rig):
for pose_bone in rig.pose.bones:
pose_bone.bone.inherit_scale = "FULL"
pose_bone.scale = Vector((1,1,1))
class CCICCharacterProportions(bpy.types.Operator):
"""Edit a characters proportions to generate a new bind pose shape"""
bl_idname = "ccic.characterproportions"
bl_label = "Character Proportions"
bl_options = {"REGISTER", "UNDO"}
param: bpy.props.StringProperty(
name = "param",
default = "",
options={"HIDDEN"}
)
def execute(self, context):
props = vars.props()
chr_cache = props.get_context_character_cache(context)
if chr_cache and not chr_cache.rigified:
if self.param == "BEGIN":
prep_rig(chr_cache)
convert_to_blender_bone_names(chr_cache)
elif self.param == "END":
apply_proportion_pose(chr_cache)
restore_rig(chr_cache)
restore_cc_bone_names(chr_cache)
elif self.param.startswith("INHERIT_SCALE"):
inherit_scale = self.param[14:]
if utils.get_mode() == "POSE" and utils.get_active_object() and bpy.context.active_pose_bone:
set_child_inherit_scale(utils.get_active_object(), bpy.context.active_pose_bone, inherit_scale)
elif self.param == "RESET":
if utils.get_mode() == "POSE" and utils.get_active_object():
reset_proportions(utils.get_active_object())
return {"FINISHED"}
@classmethod
def description(cls, context, properties):
if properties.param == "BEGIN":
return """Begin character proportion editing"""
elif properties.param == "END":
return """End character proportion editing"""
return ""
@@ -0,0 +1,810 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy, struct, json, os
from mathutils import Vector, Matrix, Color, Quaternion
from enum import IntEnum
from . import utils, rigutils, nodeutils, imageutils
class RLXCodes(IntEnum):
RLX_ID_LIGHT = 0xCC01
RLX_ID_CAMERA = 0xCC02
RECTANGULAR_AS_AREA = False
TUBE_AS_AREA = True
ENERGY_SCALE = 35 * 0.7
SUN_SCALE = 2 * 0.7
class BinaryData():
data: bytearray = None
offset: int = 0
def __init__(self, data: bytearray = None, start_offset = 0,
file_path: str = None, file = None):
if data:
self.data = data
elif file_path:
with open(file_path, 'rb') as read_file:
self.data = bytearray(read_file.read())
elif file:
self.data = bytearray(file.read())
self.offset = start_offset
def json(self):
size = self.int()
data = self.bytes(size)
text = data.decode("utf-8")
obj = json.loads(text)
return obj
def float(self):
value = struct.unpack_from("!f", self.data, self.offset)[0]
self.offset += 4
return value
def int(self):
value = struct.unpack_from("!I", self.data, self.offset)[0]
self.offset += 4
return value
def bool(self):
value = struct.unpack_from("!?", self.data, self.offset)[0]
self.offset += 1
return value
def string(self):
length = self.int()
data = self.bytes(length)
value = data.decode(encoding="utf-8")
return value
def time(self):
time_code = self.int()
return float(time_code) / 6000.0
def vector(self):
x = self.float()
y = self.float()
z = self.float()
value = Vector((x, y, z))
return value
def quaternion(self):
x = self.float()
y = self.float()
z = self.float()
w = self.float()
value = Quaternion((w, x, y, z))
return value
def color(self):
r = self.float()
g = self.float()
b = self.float()
value = Color((r, g, b))
return value
def bytes(self, size):
sub_data = self.data[self.offset:self.offset+size]
self.offset += size
return sub_data
def block(self):
size = self.int()
data = self.bytes(size)
return BinaryData(data=data)
def eof(self):
return self.offset >= len(self.data)
def import_rlx(file_path):
data_folder, data_file = os.path.split(file_path)
data = BinaryData(file_path=file_path)
rlx_code = data.int()
utils.log_info(f"RLX Code: {rlx_code}")
if rlx_code == RLXCodes.RLX_ID_LIGHT:
return import_rlx_light(data, data_folder)
elif rlx_code == RLXCodes.RLX_ID_CAMERA:
return import_rlx_camera(data, data_folder)
return None
def remap_file(file_path, data_folder):
if file_path and data_folder:
orig_folder, orig_file = os.path.split(file_path)
file_path = os.path.join(data_folder, orig_file)
return file_path
def prep_rlx_actions(obj, name, motion_id, reuse_existing=False, timestamp=False, motion_prefix=None):
if not motion_id:
motion_id = "DataLink"
if timestamp:
motion_id += f"_{utils.datetimes()}"
f_prefix = rigutils.get_formatted_prefix(motion_prefix)
# generate names
T = utils.get_slot_type_for(obj.data)
ob_name = f"{f_prefix}{name}|O|{motion_id}"
data_name = f"{f_prefix}{name}|{T[0]}|{motion_id}"
# find existing actions
ob_action = utils.safe_get_action(obj)
data_action = utils.safe_get_action(obj.data)
# reuse existing by name if nothing on the object
if reuse_existing and not ob_action and ob_name in bpy.data.actions:
ob_action = bpy.data.actions[ob_name]
if reuse_existing and not data_action and data_name in bpy.data.actions:
data_action = bpy.data.actions[data_name]
# clear existing actions or create new ones
if ob_action:
utils.clear_action(ob_action)
ob_action.name = ob_name
else:
ob_action = bpy.data.actions.new(ob_name)
# clear or add action for object data animation
if data_action and data_action != ob_action:
utils.clear_action(data_action)
data_action.name = data_name
elif utils.B440():
data_action = ob_action
else:
data_action = bpy.data.actions.new(data_name)
if utils.B440():
# add slots to Blender 4.4 actions
ob_slot = ob_action.slots.new("OBJECT", ob_name)
data_slot = data_action.slots.new(T, data_name)
else:
ob_slot = None
data_slot = None
# set the actions
utils.safe_set_action(obj, ob_action, slot=ob_slot)
utils.safe_set_action(obj.data, data_action, slot=data_slot)
return ob_action, data_action, ob_slot, data_slot
def import_rlx_light(data: BinaryData, data_folder):
light_data = data.json()
# make the light
link_id = light_data["link_id"]
light = find_link_id(link_id)
light = decode_rlx_light(light_data, light)
# static properties
name: str = light_data["name"]
type: str = light_data["type"]
inverse_square: bool = light_data["inverse_square"]
transmission: bool = light_data["transmission"]
is_tube: bool = light_data["is_tube"]
tube_length: float = light_data["tube_length"] / 100
tube_radius: float = light_data["tube_radius"] / 100
tube_soft_radius: float = light_data["tube_soft_radius"] / 100
is_rectangle: bool = light_data["is_rectangle"]
rect: tuple = (light_data["rect"][0] / 100, light_data["rect"][1] / 100)
cast_shadow: bool = light_data["cast_shadow"]
num_frames = light_data["frame_count"]
light_type = get_light_type(type, is_rectangle, is_tube)
cookie = remap_file(light_data.get("cookie"), data_folder)
ies = remap_file(light_data.get("ies"), data_folder)
build_light_nodes(light, cookie, ies)
# now read in the frames and create an action for the light...
frames = data.block()
loc_cache = frame_cache(num_frames, 3)
rot_cache = frame_rotation_cache(light, num_frames)
sca_cache = frame_cache(num_frames, 3)
color_cache = frame_cache(num_frames, 3)
energy_cache = frame_cache(num_frames)
cutoff_distance_cache = frame_cache(num_frames)
spot_blend_cache = frame_cache(num_frames)
spot_size_cache = frame_cache(num_frames)
frame = 0
start = None
while not frames.eof():
frame += 1
time = frames.time()
frame = frames.int()
if start is None:
start = frame
active = frames.bool()
loc = frames.vector() / 100
rot = frames.quaternion()
sca = frames.vector()
color = frames.color()
multiplier = frames.float()
range = frames.float() / 100
angle = frames.float() * 0.01745329
falloff = frames.float() / 100
attenuation = frames.float() / 100
darkness = frames.float()
if not active:
multiplier = 0.0
cutoff_distance = range
store_frame(light, loc_cache, frame, start, loc)
store_frame(light, rot_cache, frame, start, rot)
store_frame(light, sca_cache, frame, start, sca)
store_frame(light, color_cache, frame, start, color)
store_frame(light, cutoff_distance_cache, frame, start, cutoff_distance)
if light_type == "SUN":
energy = SUN_SCALE * multiplier
store_frame(light, energy_cache, frame, start, energy)
elif light_type == "SPOT":
energy = ENERGY_SCALE * multiplier
spot_blend = (falloff + attenuation) / 2
spot_size = angle
store_frame(light, energy_cache, frame, start, energy)
store_frame(light, spot_blend_cache, frame, start, spot_blend)
store_frame(light, spot_size_cache, frame, start, spot_size)
elif light_type == "AREA":
energy = ENERGY_SCALE * multiplier
store_frame(light, energy_cache, frame, start, energy)
elif light_type == "POINT":
energy = ENERGY_SCALE * multiplier
store_frame(light, energy_cache, frame, start, energy)
ob_action, light_action, ob_slot, light_slot = prep_rlx_actions(light, name, "Export",
reuse_existing=False,
timestamp=True)
add_cache_fcurves(ob_action, light.path_from_id("location"), loc_cache, num_frames, "Location", slot=ob_slot)
add_cache_rotation_fcurves(light, ob_action, rot_cache, num_frames, slot=ob_slot)
add_cache_fcurves(ob_action, light.path_from_id("scale"), sca_cache, num_frames, "Scale", slot=ob_slot)
add_cache_fcurves(light_action, light.data.path_from_id("color"), color_cache, num_frames, "Color", slot=light_slot)
add_cache_fcurves(light_action, light.data.path_from_id("energy"), energy_cache, num_frames, "Energy", slot=light_slot)
add_cache_fcurves(light_action, light.data.path_from_id("cutoff_distance"), cutoff_distance_cache, num_frames, "Cutoff Distance", slot=light_slot)
if light_type == "SPOT":
add_cache_fcurves(light_action, light.data.path_from_id("spot_blend"), spot_blend_cache, num_frames, "Spot Blend", slot=light_slot)
add_cache_fcurves(light_action, light.data.path_from_id("spot_size"), spot_size_cache, num_frames, "Spot Size", slot=light_slot)
def import_rlx_camera(data: BinaryData, data_folder):
camera_data = data.json()
# make the camera
link_id = camera_data["link_id"]
camera = find_link_id(link_id)
camera = decode_rlx_camera(camera_data, camera)
# static properties
link_id = camera_data["link_id"]
name: str = camera_data["name"]
fit = camera_data["fit"]
width = camera_data["width"] # mm
height = camera_data["height"] # mm
far_clip = camera_data["far_clip"] / 100
near_clip = camera_data["near_clip"] / 100
pivot_pos = utils.array_to_vector(camera_data["pos"]) / 100
dof_weight = camera_data["dof_weight"]
dof_decay = camera_data["dof_decay"]
# now read in the frames and create an action for the light...
num_frames = camera_data["frame_count"]
frames = data.block()
loc_cache = frame_cache(num_frames, 3)
rot_cache = frame_rotation_cache(camera, num_frames)
sca_cache = frame_cache(num_frames, 3)
lens_cache = frame_cache(num_frames)
dof_cache = frame_cache(num_frames)
focus_distance_cache = frame_cache(num_frames)
f_stop_cache = frame_cache(num_frames)
active_cache = []
frame = 0
start = None
while not frames.eof():
frame += 1
time = frames.time()
frame = frames.int()
if start is None:
start = frame
loc = frames.vector() / 100
rot = frames.quaternion()
sca = frames.vector()
focal_length = frames.float() # mm
dof_enable = frames.bool()
dof_focus = frames.float() / 100
dof_range = frames.float() / 100
dof_far_blur = frames.float()
dof_near_blur = frames.float()
dof_far_transition = frames.float() / 100
dof_near_transition = frames.float() / 100
dof_min_blend_distance = frames.float()
fov = frames.float()
active = frames.bool()
store_frame(camera, loc_cache, frame, start, loc)
store_frame(camera, rot_cache, frame, start, rot)
store_frame(camera, sca_cache, frame, start, sca)
store_frame(camera, lens_cache, frame, start, focal_length)
store_frame(camera, dof_cache, frame, start, 1.0 if dof_enable else 0.0)
store_frame(camera, focus_distance_cache, frame, start, dof_focus)
blur = (dof_far_blur + dof_near_blur) / 2
transition = (1 / blur) * (dof_range + dof_far_transition + dof_near_transition) / 16
f_stop = transition
store_frame(camera, f_stop_cache, frame, start, f_stop)
active_cache.append((frame, time, active))
ob_action, cam_action, ob_slot, cam_slot = prep_rlx_actions(camera, name, "Export",
reuse_existing=False,
timestamp=True)
add_cache_fcurves(ob_action, "location", loc_cache, num_frames, "Location", slot=ob_slot)
add_cache_rotation_fcurves(camera, ob_action, rot_cache, num_frames, slot=ob_slot)
add_cache_fcurves(ob_action, "scale", sca_cache, num_frames, "Scale", slot=ob_slot)
add_cache_fcurves(cam_action, "lens", lens_cache, num_frames, "Camera", slot=cam_slot)
add_cache_fcurves(cam_action, "dof.use_dof", dof_cache, num_frames, "DOF", slot=cam_slot)
add_cache_fcurves(cam_action, "dof.focus_distance", focus_distance_cache, num_frames, "DOF", slot=cam_slot)
add_cache_fcurves(cam_action, "dof.aperture_fstop", f_stop_cache, num_frames, "DOF", slot=cam_slot)
add_camera_markers(camera, active_cache, num_frames, start)
def frame_rotation_cache(obj, frames):
if obj.rotation_mode == "QUATERNION":
indices = 4
defaults = [1,0,0,0]
elif obj.rotation_mode == "AXIS_ANGLE":
indices = 4
defaults = [0,0,1,0]
else: # transform_object.rotation_mode in [ "XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX" ]:
indices = 3
defaults = [0,0,0]
cache = []
for i in range(0, indices):
data = [0, defaults[i]] * frames
for j in range(0, frames):
data[j * 2] = j
cache.append(data)
return cache
def frame_cache(frames, indices=1, default_value=0.0):
cache = []
for i in range(0, indices):
data = [0, default_value] * frames
for j in range(0, frames):
data[j * 2] = j
cache.append(data)
return cache
def store_frame(obj, cache, frame, start, value):
T = type(value)
index = (frame - start) * 2
if T is Quaternion:
if obj.rotation_mode == "QUATERNION":
l = len(value)
for i in range(0, l):
curve = cache[i]
curve[index] = frame
curve[index + 1] = value[i]
elif obj.rotation_mode == "AXIS_ANGLE":
# convert quaternion to angle axis
v,a = value.to_axis_angle()
l = len(v)
for i in range(0, l):
curve = cache[i]
curve[index] = frame
curve[index + 1] = v[i]
curve = cache[3]
curve[index] = frame
curve[index + 1] = a
else:
euler = value.to_euler(obj.rotation_mode)
l = len(euler)
for i in range(0, l):
curve = cache[i]
curve[index] = frame
curve[index + 1] = euler[i]
elif T is Vector or T is Color:
l = len(value)
for i in range(0, l):
curve = cache[i]
curve[index] = frame
curve[index + 1] = value[i]
else:
curve = cache[0]
curve[index] = frame
curve[index + 1] = value
def add_cache_rotation_fcurves(obj, action: bpy.types.Action, cache, num_frames, slot=None):
if obj.rotation_mode == "QUATERNION":
data_path = obj.path_from_id("rotation_quaternion")
group_name = "Rotation Quaternion"
elif obj.rotation_mode == "AXIS_ANGLE":
data_path = obj.path_from_id("rotation_axis_angle")
group_name = "Rotation Axis-Angle"
else: # Euler
data_path = obj.path_from_id("rotation_euler")
group_name = "Rotation Euler"
add_cache_fcurves(action, data_path, cache, num_frames, group_name=group_name, slot=slot)
def add_cache_fcurves(action: bpy.types.Action, data_path, cache, num_frames, group_name=None, slot=None):
channels = utils.get_action_channels(action, slot)
num_curves = len(cache)
if channels:
fcurve: bpy.types.FCurve = None
if group_name not in channels.groups:
channels.groups.new(group_name)
for i in range(0, num_curves):
fcurve = channels.fcurves.new(data_path, index=i)
fcurve.group = channels.groups[group_name]
fcurve.keyframe_points.add(num_frames)
fcurve.keyframe_points.foreach_set('co', cache[i])
def add_camera_markers(camera, cache, num_frames, start):
scene = bpy.context.scene
frames = len(cache)
# wipe all camera markers for this camera in this frame range
to_remove = []
for marker in scene.timeline_markers:
if marker.frame >= start and marker.frame < start + num_frames:
if marker.camera == camera:
to_remove.append(marker)
for marker in to_remove:
scene.timeline_markers.remove(marker)
# add markers for camera only when camera first activates
last_active = False
for i, (frame, time, active) in enumerate(cache):
if active and not last_active:
marker = scene.timeline_markers.new(f"RLCam_F{frame}")
marker.frame = frame
marker.camera = camera
last_active = active
def decode_rlx_light(light_data, light: bpy.types.Object=None, container=None):
# static properties
link_id = light_data["link_id"]
name: str = light_data["name"]
type: str = light_data["type"]
inverse_square: bool = light_data["inverse_square"]
transmission: bool = light_data["transmission"]
is_tube: bool = light_data["is_tube"]
tube_length: float = light_data["tube_length"] / 100
tube_radius: float = light_data["tube_radius"] / 100
tube_soft_radius: float = light_data["tube_soft_radius"] / 100
is_rectangle: bool = light_data["is_rectangle"]
rect: tuple = (light_data["rect"][0] / 100, light_data["rect"][1] / 100)
cast_shadow: bool = light_data["cast_shadow"]
# animateable properties
active = light_data["active"]
loc = utils.array_to_vector(light_data["loc"]) / 100
rot = utils.array_to_quaternion(light_data["rot"])
sca = utils.array_to_vector(light_data["sca"])
color = utils.array_to_color(light_data["color"])
multiplier = light_data["multiplier"]
range = light_data["range"] / 100
angle = light_data["angle"] * 0.01745329
falloff = light_data["falloff"] / 100
attenuation = light_data["attenuation"] / 100
darkness = light_data["darkness"]
light_type = get_light_type(type, is_rectangle, is_tube)
ob_action = utils.safe_get_action(light) if light else None
light_action = utils.safe_get_action(light.data) if light else None
if light and (light.type != "LIGHT" or light.data.type != light_type):
utils.delete_light_object(light)
light = None
if not light:
if light_type == "AREA":
light = add_area_light(light_data["name"], container)
elif light_type == "POINT":
light = add_point_light(light_data["name"], container)
elif light_type == "SUN":
light = add_dir_light(light_data["name"], container)
else:
light = add_spot_light(light_data["name"], container)
utils.set_rl_link_id(light, link_id)
utils.safe_set_action(light, ob_action)
utils.safe_set_action(light.data, light_action)
light.location = loc
utils.set_transform_rotation(light, rot)
light.scale = sca
light.data.color = color
if light_type == "SUN":
light.data.energy = SUN_SCALE * multiplier
elif light_type == "SPOT":
light.data.energy = ENERGY_SCALE * multiplier
light.data.use_custom_distance = True
light.data.cutoff_distance = range
light.data.spot_blend = (falloff*attenuation + attenuation) / 2
light.data.spot_size = angle
if utils.B410():
try:
light.data.use_soft_falloff = True
except: ...
if is_rectangle:
light.data.shadow_soft_size = (rect[0] + rect[1]) / 3
elif is_tube:
light.data.shadow_soft_size = (tube_radius + tube_length) / 3
elif light_type == "AREA":
light.data.energy = ENERGY_SCALE * multiplier
light.data.use_custom_distance = True
light.data.cutoff_distance = range
if is_rectangle:
light.data.shape = "RECTANGLE"
light.data.size = rect[0]
light.data.size_y = rect[1]
elif is_tube:
light.data.shape = "ELLIPSE"
light.data.size = 10 * max(0.01, tube_length)
light.data.size_y = tube_radius
elif light_type == "POINT":
light.data.energy = ENERGY_SCALE * 2.0 * multiplier
light.data.use_custom_distance = True
light.data.cutoff_distance = range
light.data.use_shadow = cast_shadow
if cast_shadow:
if utils.B420():
light.data.use_shadow_jitter = True
else:
if light_type != "SUN":
light.data.shadow_buffer_clip_start = 0.0025
light.data.shadow_buffer_bias = 1.0
light.data.use_contact_shadow = True
light.data.contact_shadow_distance = 0.1
light.data.contact_shadow_bias = 0.03
light.data.contact_shadow_thickness = 0.001
if not active:
utils.hide(light)
return light
def apply_light_pose(light, loc, rot, sca, color, active, multiplier, range, angle, falloff, attenuation, darkness):
light.location = loc
utils.set_transform_rotation(light, rot)
light.scale = sca
light.data.color = color
if not active:
multiplier = 0.0
if light.data.type == "SUN":
light.data.energy = 2 * multiplier
elif light.data.type == "SPOT":
light.data.energy = ENERGY_SCALE * multiplier
light.data.cutoff_distance = range / 100
light.data.spot_blend = (attenuation * falloff + attenuation) / 200
light.data.spot_size = angle * 0.01745329
elif light.data.type == "AREA":
light.data.energy = ENERGY_SCALE * multiplier
light.data.cutoff_distance = range / 100
elif light.data.type == "POINT":
light.data.energy = ENERGY_SCALE * 2.0 * multiplier
light.data.cutoff_distance = range / 100
def decode_rlx_camera(camera_data, camera):
# static properties
link_id = camera_data["link_id"]
name: str = camera_data["name"]
fit = camera_data["fit"]
width = camera_data["width"] # mm
height = camera_data["height"] # mm
far_clip = camera_data["far_clip"] / 100
near_clip = camera_data["near_clip"] / 100
pivot_pos = utils.array_to_vector(camera_data["pos"]) / 100
dof_weight = camera_data["dof_weight"]
dof_decay = camera_data["dof_decay"]
# animateable properties
fov = camera_data["fov"]
focal_length = camera_data["focal_length"] # mm
loc = utils.array_to_vector(camera_data["loc"]) / 100
rot = utils.array_to_quaternion(camera_data["rot"])
sca = utils.array_to_vector(camera_data["sca"])
dof_enable = camera_data["dof_enable"]
dof_focus = camera_data["dof_focus"] / 100
dof_range = camera_data["dof_range"] / 100
dof_far_blur = camera_data["dof_far_blur"] # 0.1 - 1.8
dof_near_blur = camera_data["dof_near_blur"] # 0.1 - 1.8
dof_far_transition = camera_data["dof_far_transition"] / 100
dof_near_transition = camera_data["dof_near_transition"] / 100
dof_min_blend_distance = camera_data["dof_min_blend_distance"] # 0.0 - 1.0
active = camera_data["active"]
ob_action = utils.safe_get_action(camera) if camera else None
cam_action = utils.safe_get_action(camera.data) if camera else None
if camera and camera.type != "CAMERA":
utils.delete_object(camera)
camera = None
if not camera:
camera = add_camera(name)
utils.set_rl_link_id(camera, link_id)
utils.safe_set_action(camera, ob_action)
utils.safe_set_action(camera.data, cam_action)
camera.location = loc
utils.set_transform_rotation(camera, rot)
camera.scale = sca
camera.data.lens = focal_length
camera.data.sensor_fit = fit
camera.data.sensor_width = width
camera.data.sensor_height = height
camera.data.clip_start = near_clip
camera.data.clip_end = far_clip
# depth of field
camera.data.dof.use_dof = dof_enable
camera.data.dof.focus_distance = dof_focus
# not much we can do about blur as DOF blur is a global scene setting in Blender (and only for Eevee)
# bpy.data.scenes["Scene"].eevee.bokeh_max_size
# TODO maybe blur can be incorporated into f_stop
# TODO maybe dof_range too (perfect focus range)
blur = (dof_far_blur + dof_near_blur) / 2
# transition range can be interpreted as the f-stop
transition = (1 / blur) * (dof_range + dof_far_transition + dof_near_transition) / 16
f_stop = transition
camera.data.dof.aperture_fstop = f_stop
return camera
def apply_camera_pose(camera, loc, rot, sca, focal_length,
dof_enable, dof_focus, dof_range,
dof_far_blur, dof_near_blur,
dof_far_transition, dof_near_transition, dof_min_blend_distance):
camera.location = loc
utils.set_transform_rotation(camera, rot)
camera.scale = sca
camera.data.lens = focal_length
# depth of field
camera.data.dof.use_dof = dof_enable
camera.data.dof.focus_distance = dof_focus / 100
# not much we can do about blur as DOF blur is a global scene setting in Blender (and only for Eevee)
# bpy.data.scenes["Scene"].eevee.bokeh_max_size
# TODO maybe blur can be incorporated into f_stop
# TODO maybe dof_range too (perfect focus range)
blur = (dof_far_blur + dof_near_blur) / 2
# transition range can be interpreted as the f-stop
transition = (1 / blur) * (dof_range + dof_far_transition + dof_near_transition) / 1600
f_stop = transition
camera.data.dof.aperture_fstop = f_stop
def get_light_type(rl_type, is_rectangle, is_tube):
shape = "RECTANGLE" if is_rectangle else "TUBE" if is_tube else "NONE"
if rl_type == "DIR":
light_type = "SUN"
else:
light_type = rl_type
if TUBE_AS_AREA and shape == "TUBE":
light_type = "AREA"
if RECTANGULAR_AS_AREA and shape == "RECTANGLE":
light_type = "AREA"
# area lights reproduce linear falloff (none inverse_square) lights best
#if light_type == "SPOT" or light_type == "POINT":
# if (shape == "TUBE" or shape == "NONE") and not inverse_square:
# light_type = "AREA"
return light_type
def find_link_id(link_id: str):
for obj in bpy.data.objects:
obj_link_id = utils.get_rl_link_id(obj)
if obj_link_id == link_id:
return obj
return None
def add_camera(name, container=None):
bpy.ops.object.camera_add()
camera = utils.get_active_object()
camera.name = name
camera.data.name = name
utils.set_ccic_id(camera)
if container:
camera.parent = container
camera.matrix_parent_inverse = container.matrix_world.inverted()
return camera
def add_spot_light(name, container=None):
bpy.ops.object.light_add(type="SPOT")
light = utils.get_active_object()
light.name = name
light.data.name = name
utils.set_ccic_id(light)
if container:
light.parent = container
light.matrix_parent_inverse = container.matrix_world.inverted()
return light
def add_area_light(name, container=None):
bpy.ops.object.light_add(type="AREA")
light = utils.get_active_object()
light.name = name
light.data.name = name
utils.set_ccic_id(light)
if container:
light.parent = container
light.matrix_parent_inverse = container.matrix_world.inverted()
return light
def add_point_light(name, container=None):
bpy.ops.object.light_add(type="POINT")
light = utils.get_active_object()
light.name = name
light.data.name = name
utils.set_ccic_id(light)
if container:
light.parent = container
light.matrix_parent_inverse = container.matrix_world.inverted()
return light
def add_dir_light(name, container=None):
bpy.ops.object.light_add(type="SUN")
light = utils.get_active_object()
light.name = name
light.data.name = name
utils.set_ccic_id(light)
if container:
light.parent = container
light.matrix_parent_inverse = container.matrix_world.inverted()
return light
def add_light_container():
container = None
for obj in bpy.data.objects:
if obj.type == "EMPTY" and "Lighting" in obj.name and utils.has_ccic_id(obj):
container = obj
if not container:
bpy.ops.object.empty_add(type="PLAIN_AXES", radius=0.01)
container = utils.get_active_object()
container.name = "Lighting"
utils.set_ccic_id(container)
children = utils.get_child_objects(container)
for child in children:
if utils.has_ccic_id(child) and child.type == "LIGHT":
utils.delete_object_tree(child)
return container
def build_light_nodes(light, cookie, ies):
if light and (cookie or ies):
light.data.use_nodes = True
nodes: bpy.types.Nodes = light.data.node_tree.nodes
links = light.data.node_tree.links
nodes.clear()
emission_node: bpy.types.ShaderNodeEmission = nodes.new("ShaderNodeEmission")
output_node: bpy.types.ShaderNodeOutputLight = nodes.new("ShaderNodeOutputLight")
nodeutils.link_nodes(links, emission_node, "Emission", output_node, "Surface")
emission_node.location = Vector((40, 380))
output_node.location = Vector((320, 300))
if ies:
ies_node: bpy.types.ShaderNodeTexIES = nodes.new("ShaderNodeTexIES")
ies_node.mode = "EXTERNAL"
ies_node.filepath = ies
nodeutils.set_node_input_value(ies_node, "Strength", 0.01)
nodeutils.link_nodes(links, ies_node, "Fac", emission_node, "Strength")
ies_node.location = Vector((-220, 200))
if cookie:
cookie_node: bpy.types.ShaderNodeTexImage = nodes.new("ShaderNodeTexImage")
cookie_node.image = imageutils.load_image(cookie, "sRGB")
nodeutils.link_nodes(links, cookie_node, "Color", emission_node, "Color")
cookie_node.location = Vector((-320, 520))
@@ -0,0 +1,585 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import bpy
from mathutils import Vector
from . import rigidbody, utils, bones, vars
HEAD_RIG_NAME = "RLS_Hair_Rig_Head"
JAW_RIG_NAME = "RLS_Hair_Rig_Jaw"
HAIR_BONE_PREFIX = "Hair"
BEARD_BONE_PREFIX = "Beard"
HEAD_BONE_NAMES = ["ORG-spine.006", "CC_Base_Head", "RL_Head", "Head", "head"]
JAW_BONE_NAMES = ["ORG-jaw", "CC_Base_JawRoot", "RL_JawRoot", "JawRoot", "teeth.B"]
EYE_BONE_NAMES = ["ORG-eye.R", "ORG-eye.L", "CC_Base_R_Eye", "CC_Base_L_Eye", "CC_Base_R_Eye", "CC_Base_L_Eye"]
ROOT_BONE_NAMES = HEAD_BONE_NAMES.copy().extend(JAW_BONE_NAMES.copy())
AVAILABLE_SPRING_RIG_LIST = []
def get_all_parent_modes(chr_cache, arm):
return ["HEAD", "JAW"]
def get_spring_rig_name(arm, parent_mode):
if parent_mode == "JAW":
spring_rig_name = JAW_RIG_NAME
else:
spring_rig_name = HEAD_RIG_NAME
# fix any old spring bone rig names
old_spring_rig_name = "RL_" + spring_rig_name[4:]
if old_spring_rig_name in arm.data.bones:
return old_spring_rig_name
return spring_rig_name
def has_spring_rig(chr_cache, arm, parent_mode):
spring_rig_name = get_spring_rig_name(arm, parent_mode)
spring_rig = bones.get_bone(arm, spring_rig_name)
return spring_rig is not None
def has_spring_rigs(chr_cache, arm):
parent_modes = get_all_parent_modes(chr_cache, arm)
for parent_mode in parent_modes:
if has_spring_rig(chr_cache, arm, parent_mode):
return True
return False
def has_spring_systems(chr_cache):
if chr_cache:
arm = chr_cache.get_armature()
if arm:
parent_modes = get_all_parent_modes(chr_cache, arm)
for parent_mode in parent_modes:
rig_prefix = get_spring_rig_prefix(parent_mode)
rigid_body_system = rigidbody.get_spring_rigid_body_system(arm, rig_prefix)
if rigid_body_system:
return True
return False
def get_spring_systems(chr_cache):
spring_systems = []
if chr_cache:
arm = chr_cache.get_armature()
if arm:
parent_modes = get_all_parent_modes(chr_cache, arm)
for parent_mode in parent_modes:
rig_prefix = get_spring_rig_prefix(parent_mode)
rigid_body_system = rigidbody.get_spring_rigid_body_system(arm, rig_prefix)
if rigid_body_system:
spring_systems.append(rigid_body_system)
return spring_systems
def rigidbody_state():
has_rigidbody = False
is_baked = False
is_baking = False
point_cache = None
rigidbody_world = bpy.context.scene.rigidbody_world
if rigidbody_world:
has_rigidbody = True
point_cache = rigidbody_world.point_cache
is_baked = point_cache.is_baked
is_baking = point_cache.is_baking
return has_rigidbody, is_baked, is_baking, point_cache
def get_spring_rigs(chr_cache, arm, parent_modes : list = None, mode = "POSE"):
"""Returns { parent_mode: {
"name": rig_name,
"bone_name": rig_root.name,
"bone": rig_root
} }
The bone will be either the edit bone, pose bone or bone depending on which mode Blender is in.
(or the pose if preferred)
"""
if not parent_modes:
parent_modes = get_all_parent_modes(chr_cache, arm)
spring_rigs = {}
for parent_mode in parent_modes:
spring_rig_name = get_spring_rig_name(arm, parent_mode)
spring_rig_bone = get_spring_rig(chr_cache, arm, parent_mode, mode)
if spring_rig_bone:
spring_rigs[parent_mode] = { "name": spring_rig_name,
"bone_name" : spring_rig_bone.name,
"bone": spring_rig_bone }
return spring_rigs
def get_spring_rig_names(chr_cache, arm, parent_modes = None, mode = "POSE"):
spring_rigs = get_spring_rigs(chr_cache, arm, parent_modes, mode)
return [v["bone_name"] for v in spring_rigs.values()]
def get_spring_rig_from_child(chr_cache, arm, bone_name, prefer_pose = True):
try:
if prefer_pose or utils.get_mode() == "POSE":
bone = arm.pose.bones[bone_name]
elif utils.get_mode() == "EDIT":
bone = arm.data.edit_bones[bone_name]
else:
bone = arm.data.bones[bone_name]
except:
bone = None
if bone:
spring_rigs = get_spring_rigs(chr_cache, arm, mode = "POSE")
while bone.parent:
for parent_mode in spring_rigs:
if spring_rigs[parent_mode]["bone"] == bone.parent:
return spring_rigs[parent_mode], bone.name, parent_mode
bone = bone.parent
return None, None, None
def get_spring_rig(chr_cache, arm, parent_mode, mode = "POSE", create_if_missing = False):
"""This will return either the edit bone, pose bone or bone depending on which mode Blender is in.
(or the pose if preferred)
"""
if parent_mode and chr_cache and arm:
spring_rig_name = get_spring_rig_name(arm, parent_mode)
spring_rig = None
if mode == "EDIT" and utils.get_mode() != "EDIT":
utils.edit_mode_to(arm)
if mode == "POSE" or utils.get_mode() == "POSE":
if spring_rig_name in arm.pose.bones:
return arm.pose.bones[spring_rig_name]
elif mode == "EDIT" and utils.get_mode() == "EDIT":
if spring_rig_name in arm.data.edit_bones:
spring_rig = arm.data.edit_bones[spring_rig_name]
if not spring_rig and create_if_missing:
anchor_bone_name = get_spring_anchor_name(chr_cache, arm, parent_mode)
center_position = get_spring_rig_position(chr_cache, arm, parent_mode)
spring_rig = bones.new_edit_bone(arm, spring_rig_name, anchor_bone_name)
spring_rig.head = arm.matrix_world.inverted() @ center_position
spring_rig.tail = arm.matrix_world.inverted() @ (center_position + Vector((0,1/32,0)))
spring_rig.align_roll(Vector((0,0,1)))
bones.set_bone_collection(arm, spring_rig, "Spring (Root)", None, vars.SPRING_ROOT_LAYER)
bones.set_bone_collection_visibility(arm, "Spring (Root)", vars.SPRING_ROOT_LAYER, False)
# TODO spring roots are put in the DEF bones by Rigify...
return spring_rig
else:
if spring_rig_name in arm.data.bones:
return arm.data.bones[spring_rig_name]
return None
def get_spring_rig_prefix(parent_mode):
if parent_mode == "HEAD":
return HAIR_BONE_PREFIX
elif parent_mode == "JAW":
return BEARD_BONE_PREFIX
else:
return "NONE"
def get_spring_anchor_name(chr_cache, arm, parent_mode):
if parent_mode == "HEAD":
possible_head_bones = HEAD_BONE_NAMES
for name in possible_head_bones:
if name in arm.data.bones:
return name
return None
elif parent_mode == "JAW":
possible_jaw_bones = JAW_BONE_NAMES
for name in possible_jaw_bones:
if name in arm.data.bones:
return name
return None
def get_spring_rig_position(chr_cache, arm, root_mode):
"""Returns the approximate position inside the head between the ears at nose height."""
head_edit_bone = get_spring_anchor_edit_bone(chr_cache, arm, "HEAD")
if head_edit_bone:
head_pos = arm.matrix_world @ head_edit_bone.head
eye_pos = Vector((0,0,0))
count = 0
for eye_bone_name in EYE_BONE_NAMES:
eye_edit_bone = bones.get_edit_bone(arm, eye_bone_name)
if eye_edit_bone:
count += 1
eye_pos += arm.matrix_world @ eye_edit_bone.head
if count > 0:
eye_pos /= count
if root_mode == "HEAD":
return Vector((head_pos[0], head_pos[1], eye_pos[2]))
elif root_mode == "JAW":
return Vector((head_pos[0], (head_pos[1] + 2 * eye_pos[1]) / 3, head_pos[2]))
else:
return head_pos
return None
def get_spring_anchor_edit_bone(chr_cache, arm, parent_mode):
try:
return arm.data.edit_bones[get_spring_anchor_name(chr_cache, arm, parent_mode)]
except:
return None
def is_hair_bone(bone_name):
if bone_name.startswith(HAIR_BONE_PREFIX) or bone_name.startswith(BEARD_BONE_PREFIX):
return True
else:
return False
def is_hair_rig_bone(bone_name):
if bone_name.startswith(HEAD_RIG_NAME) or bone_name.startswith(JAW_RIG_NAME):
return True
else:
return False
def convert_spring_rig_to_accessory(chr_cache, arm, objects, parent_mode):
"""Removes all none hair rig vertex groups from objects so that CC4 recognizes them as accessories
and not cloth or hair.\n\n
Accessories are categorized by:\n
1. A bone representing the accessory parented to a CC Base bone. (This is the spring rig root bone)
2. Child accessory deformation bone(s) parented to the accessory bone in 1.
3. Object(s) with vertex weights to ONLY these accessory deformation bones in 2.
4. All vertices in the accessory must be weighted.
"""
groups_to_remove = []
active_object = bpy.context.active_object
if active_object not in objects:
active_object = objects[0]
accessory_name = active_object.name + "_Accessory"
# get a list of all bones in the spring rig
spring_rig_bone = get_spring_rig(chr_cache, arm, parent_mode)
if not spring_rig_bone:
return None
spring_bones = bones.get_bone_children(spring_rig_bone)
spring_bone_names = [ bone.name for bone in spring_bones ]
utils.log_info(f"Converting spring rig: {parent_mode} to accessory:")
utils.log_info(f"Spring rig bones: {spring_bone_names}")
# find all character objects with vertex groups for these bones
accessory_objects = set()
objects = chr_cache.get_all_objects(include_armature=False,
include_children=True,
of_type="MESH")
for obj in objects:
for vg in obj.vertex_groups:
if vg.name in spring_bone_names:
accessory_objects.add(obj)
# in these objects remove all vertex groups not from these bones
for obj in accessory_objects:
utils.log_info(f"Accessory Object: {obj.name}")
groups_to_remove = []
for vg in obj.vertex_groups:
if vg.name not in spring_bone_names:
groups_to_remove.append(vg)
for vg in groups_to_remove:
obj.vertex_groups.remove(vg)
spring_rig_bone.name = accessory_name
spring_bones.append(spring_rig_bone)
for bone in spring_bones:
bones.set_bone_collection(arm, bone, "Accessory", color="SPECIAL")
toggle_show_spring_bones(chr_cache)
utils.log_info(f"Accessory Created: {accessory_name}")
return accessory_name
def is_rigified(chr_cache, rig, parent_mode):
if chr_cache and rig and parent_mode:
spring_rig = get_spring_rig(chr_cache, rig, parent_mode)
if spring_rig:
pose_bone = rig.pose.bones[spring_rig.name]
if "rigified" in pose_bone and pose_bone["rigified"]:
return True
else:
return False
return None
def realign_spring_bones_axis(chr_cache, arm):
utils.edit_mode_to(arm, True)
# align z-axis away from the spring roots
spring_rigs = get_spring_rigs(chr_cache, arm, mode = "EDIT")
for parent_mode in spring_rigs:
spring_root = spring_rigs[parent_mode]["bone"]
spring_bones = bones.get_bone_children(spring_root, include_root=False)
for bone in spring_bones:
head = arm.matrix_world @ bone.head
tail = arm.matrix_world @ bone.tail
origin = arm.matrix_world @ spring_root.head
z_axis = (((head + tail) * 0.5) - origin).normalized()
bone.align_roll(z_axis)
if bone.parent != spring_root:
bone.use_connect = True
# save edit mode changes
utils.object_mode_to(arm)
def enumerate_spring_rigs(self, context):
global AVAILABLE_SPRING_RIG_LIST
props = vars.props()
chr_cache = props.get_context_character_cache(context)
if chr_cache:
arm = chr_cache.get_armature()
spring_rigs = get_spring_rigs(chr_cache, arm, mode = "POSE")
AVAILABLE_SPRING_RIG_LIST.clear()
for i, parent_mode in enumerate(spring_rigs):
list_entry = (parent_mode, f"{parent_mode} Rig", f"{parent_mode} Rig")
AVAILABLE_SPRING_RIG_LIST.append(list_entry)
if not spring_rigs:
AVAILABLE_SPRING_RIG_LIST.append(("NONE", "No Rig", "No Rig"))
return AVAILABLE_SPRING_RIG_LIST
def show_spring_bone_edit_layer(chr_cache, arm, show):
if arm:
if show:
bones.set_bone_collection_visibility(arm, "Spring (Edit)", vars.SPRING_EDIT_LAYER, True, only=True)
arm.show_in_front = True
arm.display_type = 'SOLID'
#arm.data.display_type = 'STICK'
else:
bones.set_bone_collection_visibility(arm, "Spring (Edit)", vars.SPRING_EDIT_LAYER, True, invert=True)
arm.show_in_front = False
if chr_cache.rigified:
arm.display_type = 'WIRE'
else:
arm.display_type = 'SOLID'
#arm.data.display_type = 'OCTAHEDRAL'
def show_spring_bone_rig_layers(chr_cache, arm, show):
if arm:
if show:
bones.set_bone_collection_visibility(arm, "Spring (FK)", vars.SPRING_FK_LAYER, True)
arm.show_in_front = False
else:
bones.set_bone_collection_visibility(arm, "Spring (FK)", vars.SPRING_FK_LAYER, False)
arm.show_in_front = False
if chr_cache.rigified:
arm.display_type = 'WIRE'
else:
arm.display_type = 'SOLID'
#arm.data.display_type = 'OCTAHEDRAL'
def stop_spring_animation(context):
# stop any playing animation
if context.screen.is_animation_playing:
bpy.ops.screen.animation_cancel(restore_frame=False)
# reset the animation (it is very unstable if we don't do this)
bpy.ops.screen.frame_jump(end = False)
def reset_spring_physics(context):
props = vars.props()
chr_cache = props.get_context_character_cache(context)
if chr_cache:
arm = chr_cache.get_armature()
if arm:
arm.data.pose_position = "POSE"
# reset the physics cache
bpy.context.scene.frame_current = bpy.context.scene.frame_current + 1
rigidbody.reset_cache(context)
# reset the animation again for good measure...
bpy.ops.screen.frame_jump(end = True)
bpy.ops.screen.frame_jump(end = False)
def add_spring_colliders(chr_cache):
arm = chr_cache.get_armature()
if not rigidbody.has_rigid_body_colliders(arm):
json_data = chr_cache.get_json_data()
bone_mapping = None
if chr_cache.rigified:
bone_mapping = chr_cache.get_rig_bone_mapping()
rigidbody.build_rigid_body_colliders(chr_cache, json_data, bone_mapping=bone_mapping)
def toggle_show_spring_bones(chr_cache, show_hide=None):
if chr_cache:
arm = chr_cache.get_armature()
else:
arm = utils.get_armature_from_objects(bpy.context.selected_objects)
if arm:
if show_hide:
show_spring_bone_edit_layer(chr_cache, arm, show_hide)
else:
if bones.is_bone_collection_visible(arm, "Spring (Edit)", vars.SPRING_EDIT_LAYER):
show_spring_bone_edit_layer(chr_cache, arm, False)
else:
show_spring_bone_edit_layer(chr_cache, arm, True)
class CC3OperatorSpringBones(bpy.types.Operator):
"""Blender Spring Bone Functions"""
bl_idname = "cc3.springbones"
bl_label = "Spring Bone Simulation"
#bl_options = {"REGISTER", "UNDO", "INTERNAL"}
param: bpy.props.StringProperty(
name = "param",
default = ""
)
def execute(self, context):
props = vars.props()
prefs = vars.prefs()
mode_selection = utils.store_mode_selection_state()
chr_cache = props.get_context_character_cache(context)
arm = None
if chr_cache:
arm = chr_cache.get_armature()
if self.param == "MAKE_RIGID_BODY_SYSTEM":
stop_spring_animation(context)
if arm:
parent_mode = chr_cache.available_spring_rigs
spring_rig_name = get_spring_rig_name(arm, parent_mode)
spring_rig_prefix = get_spring_rig_prefix(parent_mode)
rigidbody.build_spring_rigid_body_system(chr_cache, spring_rig_prefix, spring_rig_name)
add_spring_colliders(chr_cache)
reset_spring_physics(context)
utils.restore_mode_selection_state(mode_selection)
if self.param == "REMOVE_RIGID_BODY_SYSTEM":
stop_spring_animation(context)
if arm:
parent_mode = props.hair_rig_bone_root
spring_rig_name = get_spring_rig_name(arm, parent_mode)
spring_rig_prefix = get_spring_rig_prefix(parent_mode)
rigidbody.remove_existing_rigid_body_system(arm, spring_rig_prefix, spring_rig_name)
reset_spring_physics(context)
if self.param == "ENABLE_RIGID_BODY_COLLISION":
stop_spring_animation(context)
objects = utils.get_selected_meshes(context)
for body in objects:
rigidbody.enable_rigid_body_collision_mesh(chr_cache, body)
reset_spring_physics(context)
utils.restore_mode_selection_state(mode_selection)
if self.param == "DISABLE_RIGID_BODY_COLLISION":
stop_spring_animation(context)
objects = utils.get_selected_meshes(context)
for obj in objects:
rigidbody.disable_rigid_body_collision_mesh(chr_cache, obj)
reset_spring_physics(context)
utils.restore_mode_selection_state(mode_selection)
if self.param == "RESET_PHYSICS":
stop_spring_animation(context)
reset_spring_physics(context)
utils.restore_mode_selection_state(mode_selection)
elif self.param == "BUILD_COLLIDERS":
stop_spring_animation(context)
reset_spring_physics(context)
add_spring_colliders(chr_cache)
rigidbody.toggle_show_colliders(arm)
utils.restore_mode_selection_state(mode_selection)
elif self.param == "REMOVE_COLLIDERS":
stop_spring_animation(context)
reset_spring_physics(context)
rigidbody.remove_rigid_body_colliders(arm)
#utils.restore_mode_selection_state(mode_selection)
elif self.param == "TOGGLE_SHOW_COLLIDERS":
rigidbody.toggle_show_colliders(arm)
#utils.restore_mode_selection_state(mode_selection)
if self.param == "BAKE_PHYSICS":
context.scene.sync_mode = "NONE"
utils.object_mode_to(arm)
reset_spring_physics(context)
utils.log_info("Baking rigid body world point cache...")
bpy.ops.ptcache.bake({"point_cache": bpy.context.scene.rigidbody_world.point_cache},
"INVOKE_DEFAULT", bake=True)
# as py.ops.ptcache.bake is a modal operator, don't do *anything* afterwards,
# or Blender will crash...
return {"FINISHED"}
return {"FINISHED"}
@classmethod
def description(cls, context, properties):
props = vars.props()
if properties.param == "MAKE_RIGID_BODY_SYSTEM":
return "Build the rigid body simulation for the selected spring rig and sets contraints to copy the simulation to the spring bones"
elif properties.param == "REMOVE_RIGID_BODY_SYSTEM":
return "Removes the rigid body simulation for the selected spring rig and removes all constraints"
elif properties.param == "ENABLE_RIGID_BODY_COLLISION":
return "Enables rigid body collision for the selected mesh (or it's collision proxy mesh), so it can interact with the spring bone simulation"
elif properties.param == "DISABLE_RIGID_BODY_COLLISION":
return "Removes rigid body collision for the selected mesh (or it's collision proxy mesh), so it can interact with the spring bone simulation"
elif properties.param == "RESET_PHYSICS":
return "Resets the spring bone physics rigid body world point cache and synchronizes the cache range with the current scene or preview range"
elif properties.param == "BAKE_PHYSICS":
return "Bakes the rigid body world point cache for all spring bone simulations"
return ""
@@ -0,0 +1,590 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
# Set by __init__.py from the bl_info dict
import bpy
VERSION_STRING = "v2.3.4"
DEV = False
#DEV = True
PLUGIN_COMPATIBLE = [
"2.3.4",
]
def set_version_string(bl_info):
global VERSION_STRING
VERSION_STRING = "v"
for i, v in enumerate(bl_info["version"]):
if i > 0:
VERSION_STRING += "."
VERSION_STRING += str(v)
def prefs():
from . preferences import CC3ToolsAddonPreferences
res: CC3ToolsAddonPreferences = bpy.context.preferences.addons[__name__.partition(".")[0]].preferences
return res
def props():
from . properties import CC3ImportProps
res: CC3ImportProps = getattr(bpy.context.scene, "CC3ImportProps", None)
return res
def link_props():
from . properties import CCICLinkProps
res: CCICLinkProps = getattr(bpy.context.scene, "CCICLinkProps", None)
return res
def bake_props():
from . properties import CCICBakeProps
res: CCICBakeProps = getattr(bpy.context.scene, "CCICBakeProps", None)
return res
def get_context(context=None) -> bpy.types.Context:
if not context:
context = bpy.context
return context
# blender uses metres, CC3 uses centimetres
UNIT_SCALE = 0.01
SKIN_SSS_RADIUS_SCALE = 0.01
DEFAULT_SSS_RADIUS_SCALE = 0.01
TEETH_SSS_RADIUS_SCALE = 0.01
TONGUE_SSS_RADIUS_SCALE = 0.01
HAIR_SSS_RADIUS_SCALE = 0.01
EYES_SSS_RADIUS_SCALE = 0.01 / 5.0
EMISSION_SCALE = 50.0
SSS_CYCLES_MOD = 1 #0.05
# https://docs.blender.org/manual/en/latest/files/media/image_formats.html
IMAGE_TYPES = [".bmp", ".sgi", ".rgb", ".bw", ".png", ".jpg", ".jpeg", ".jp2", ".j2c",
".tga", ".cin", ".dpx", ".exr", ".hdr", ".tif", ".tiff"]
# base names of all node groups in the library blend file
NODE_GROUPS = ["tiling_pivot_mapping", "tiling_mapping",
"rl_tearline_shader", "rl_tearline_plus_shader",
"rl_eye_occlusion_shader", "rl_eye_occlusion_plus_shader",
"rl_skin_shader", "rl_head_shader",
"rl_tongue_shader", "rl_teeth_shader",
"rl_cornea_refractive_shader", "rl_eye_refractive_shader",
"rl_cornea_parallax_shader", "tiling_cornea_parallax_mapping",
"rl_pbr_shader", "rl_sss_shader",
"rl_hair_shader", "rl_hair_cycles_shader",
"rl_eye_occlusion_cycles_mix_shader", "rl_tearline_cycles_shader",
"rl_tearline_cycles_mix_shader", "rl_tearline_plus_shader",
"rl_rgb_mixer", "rl_id_mixer",
"rl_tex_mod_normal_ao_blend",
"rl_wrinkle_shader",
"rl_bsdf_dual_specular",
]
ENUM_MATERIAL_TYPES = [
("DEFAULT", "Default", "Default material"),
("SSS", "Subsurface", "Subsurface Scattering material"),
("SKIN_HEAD", "Head", "Head skin material"),
("SKIN_BODY", "Body", "Body skin material"),
("SKIN_ARM", "Arm", "Arm skin material"),
("SKIN_LEG", "Leg", "Leg skin material"),
("TEETH_UPPER", "Upper Teeth", "Upper teeth material"),
("TEETH_LOWER", "Lower Teeth", "Lower teeth material"),
("TONGUE", "Tongue", "Tongue material"),
("HAIR", "Hair", "Hair material"),
("SCALP", "Scalp", "Scalp or base hair material"),
("EYELASH", "Eyelash", "Eyelash material"),
("NAILS", "Nails", "Finger and toe nails material"),
("CORNEA_RIGHT", "Right Cornea", "Right cornea material."),
("CORNEA_LEFT", "Left Cornea", "Left cornea material."),
("EYE_RIGHT", "Right Eye", "Basic PBR right eye material."),
("EYE_LEFT", "Left Eye", "Basic PBR left eye material."),
("OCCLUSION_RIGHT", "Right Eye Occlusion", "Right eye occlusion material"),
("OCCLUSION_LEFT", "Left Eye Occlusion", "Left eye occlusion material"),
("OCCLUSION_PLUS_RIGHT", "Right Eye Occlusion Plus", "Right eye occlusion material"),
("OCCLUSION_PLUS_LEFT", "Left Eye Occlusion Plus", "Left eye occlusion material"),
("TEARLINE_RIGHT", "Right Tearline", "Right tear line material"),
("TEARLINE_LEFT", "Left Tearline", "Left tear line material"),
("TEARLINE_PLUS_RIGHT", "Right Tearline Plus", "Right tear line material"),
("TEARLINE_PLUS_LEFT", "Left Tearline Plus", "Left tear line material"),
]
ENUM_OBJECT_TYPES = [
("DEFAULT", "Default", "Default object type"),
("BODY", "Body", "Base character body object"),
("TEETH", "Teeth", "Teeth object"),
("TONGUE", "Tongue", "Tongue object"),
("HAIR", "Hair", "Hair object or object with hair"),
("EYE", "Eye", "Eye object"),
("OCCLUSION", "Eye Occlusion", "Eye occlusion object"),
("OCCLUSION_PLUS", "Eye Occlusion Plus", "Eye occlusion plus object"),
("TEARLINE", "Tearline", "Tear line object"),
("TEARLINE_PLUS", "Tearline Plus", "Tear line plus object"),
]
CHARACTER_GENERATION = {
"RL_CC3_Plus": "G3Plus",
"G3Plus": "G3Plus",
"RL_CharacterCreator_Base_Game_G1_Divide_Eyelash_UV": "GameBase",
"RL_CharacterCreator_Base_Game_G1_Multi_UV": "GameBase",
"RL_CharacterCreator_Base_Game_G1_One_UV": "GameBase",
"GameBase": "GameBase",
"RL_CharacterCreator_Base_Std_G3": "G3",
"G3": "G3",
"RL_G6_Standard_Series": "G1",
"G1": "G1",
"NonStdLookAtDataCopyFromCCBase": "ActorCore",
"ActorCore": "ActorCore",
"ActorBuild": "ActorBuild",
"ActorScan": "ActorScan",
"AccuRig": "AccuRig",
"Humanoid": "Humanoid",
"Creature": "Creature",
"Prop": "Prop",
"NonStandardG3": "ActorBuild",
"NonStandardGameBase": "GameBase",
"NonStandardGeneric": "Unknown",
"Generic": "Unknown",
"NonStandard" : "Unknown",
}
# character generations considered standard humans and require FBX/OBJ keys to export
STANDARD_GENERATIONS = [
"G3Plus", "G3",
]
PROP_GENERATIONS = [
"Prop",
]
ENUM_TEX_LIST = [
("64","64 x 64","64 x 64 texture size"),
("128","128 x 128","128 x 128 texture size"),
("256","256 x 256","256 x 256 texture size"),
("512","512 x 512","512 x 512 texture size"),
("1024","1024 x 1024","1024 x 1024 texture size"),
("2048","2048 x 2048","2048 x 2048 texture size"),
("4096","4096 x 4096","4096 x 4096 texture size"),
("8192","8192 x 8192","8192 x 8192 texture size"),
]
NODE_PREFIX = "cc3iid_"
GRID_SIZE = 300
OCCLUSION_GROUP_INNER = "CC_EyeOcclusion_Inner"
OCCLUSION_GROUP_OUTER = "CC_EyeOcclusion_Outer"
OCCLUSION_GROUP_TOP = "CC_EyeOcclusion_Top"
OCCLUSION_GROUP_BOTTOM = "CC_EyeOcclusion_Bottom"
OCCLUSION_GROUP_ALL = "CC_EyeOcclusion_All"
TEARLINE_GROUP_INNER = "CC_Tearline_Inner"
TEARLINE_GROUP_ALL = "CC_Tearline_All"
ENUM_ARMATURE_TYPES = [
("NONE","Unknown","Unknown structure"),
("CC3","CC3","CC3, CC3+, iClone / ActorCore"),
("RIGIFY","Rigify","Rigify control rig structure"),
]
ENUM_ACTION_TYPES = [
("NONE","Unknown","Unknown action"),
("ARMATURE","Armature","Armature Action"),
("KEY","Shapekey","Shapekey Action"),
]
ACCESORY_PIVOT_NAME = "CC_Base_Pivot"
CC3_VISEME_NAMES = [
"Open", "Explosive", "Dental_Lip", "Tight-O", "Tight", "Wide", "Affricate", "Lip_Open",
"Tongue_up", "Tongue_Raise", "V_Tongue_Raise", "Tongue_Out", "Tongue_Narrow", "Tongue_Lower", "Tongue_Curl-U", "Tongue_Curl-D",
]
CC4_VISEME_NAMES = [
"V_Open", "V_Explosive", "V_Dental_Lip", "V_Tight_O", "V_Tight", "V_Wide", "V_Affricate", "V_Lip_Open",
"V_Tongue_up", "V_Tongue_Raise", "V_Tongue_Out", "V_Tongue_Narrow", "V_Tongue_Lower", "V_Tongue_Curl_U", "V_Tongue_Curl_D",
]
DIRECT_VISEME_NAMES = [
"EE", "Er", "IH", "Ah", "Oh", "W_OO", "S_Z", "Ch_J", "F_V", "TH", "T_L_D_N", "B_M_P", "K_G_H_NG", "AE", "R",
]
# channel packing node names and id's
PACK_DIFFUSEROUGHNESS_NAME = "DR Pack"
PACK_DIFFUSEROUGHNESS_ID = "DR_PACK"
PACK_DIFFUSEROUGHNESSBLEND1_NAME = "DRB1 Pack"
PACK_DIFFUSEROUGHNESSBLEND1_ID = "DRB1_PACK"
PACK_DIFFUSEROUGHNESSBLEND2_NAME = "DRB1 Pack"
PACK_DIFFUSEROUGHNESSBLEND2_ID = "DRB1_PACK"
PACK_DIFFUSEROUGHNESSBLEND3_NAME = "DRB1 Pack"
PACK_DIFFUSEROUGHNESSBLEND3_ID = "DRB1_PACK"
PACK_WRINKLEROUGHNESS_NAME = "Roughness Pack"
PACK_WRINKLEROUGHNESS_ID = "ROUGHNESS_PACK"
PACK_WRINKLEDISPLACEMENT_NAME = "Displacement Pack"
PACK_WRINKLEDISPLACEMENT_ID = "DISPLACEMENT_PACK"
PACK_WRINKLEFLOW_NAME = "Flow Pack"
PACK_WRINKLEFLOW_ID = "FLOW_PACK"
PACK_SSTM_NAME = "SSTM Pack"
PACK_SSTM_ID = "SSTM_PACK"
PACK_MICRODETAIL_NAME = "MICRODetail Pack"
PACK_MICRODETAIL_ID = "MICRODETAIL_PACK"
PACK_MSMNAO_NAME = "MSMNAO Pack"
PACK_MSMNAO_ID = "MSMNAO_PACK"
PACK_DIFFUSEALPHA_NAME = "DiffuseAlpha Pack"
PACK_DIFFUSEALPHA_ID = "DIFFUSEALPHA_PACK"
PACK_ROOTID_NAME = "RootID Pack"
PACK_ROOTID_ID = "ROOTID_PACK"
PACK_MRSO_NAME = "MRSO Pack"
PACK_MRSO_ID = "MRSO_PACK"
PACK_SSTMMNM_NAME = "SSTMMNM Pack"
PACK_SSTMMNM_ID = "SSTMMNM_PACK"
GAME_BASE_SKIN_NAMES = ["Ga_Skin_Arm", "Ga_Skin_Body", "Ga_Skin_Head", "Ga_Skin_Leg"]
#########################################################
# BAKE TOOL VARS
BAKE_PREFIX = "bakeutil_"
NO_SIZE = 64
DEFAULT_SIZE = 1024
BAKE_TARGETS = [
("NONE", "None", "Don't bake anything"),
("BLENDER","Blender", "Bake textures for Blender. The baked textures should be more performant than the complex node materials"),
("RL","Reallusion", "Bake textures for iClone / Character Creator"),
("SKETCHFAB","Sketchfab", "Bake and name the textures for Sketchfab. Uploading the baked textures with the .blend file to Sketchfab should auto connect the textures to the materials"),
("GLTF","GLTF", "Bake the relevant textures to be compatible with the GLTF exporter"),
("UNITY_HDRP","Unity HDRP","Bake and pack the textures for the Unity HDRP/Lit shader. Once baked only the BaseMap, Mask and Detail, Subsurface, Thickness and Emission textures are needed"),
("UNITY_URP","Unity 3D/URP","Bake the textures for Unity 3D Standard shader or for URP/Lit shader"),
("GODOT","Godot Engine","Bake the textures to be compatible with Godot Blender Exporter add-on"),
]
TARGET_FORMATS = [
("PNG","PNG", "Bake textures to PNG Format."),
("JPEG","JPEG", "Bake textures to JPEG Format."),
]
CONVERSION_FUNCTIONS = [
("IR","1 - R", "Inverted Roughness"),
("SIR","(1 - R)^2", "Squared Inverted Roughnes"),
("IRS","1 - R^2", "Inverted Roughness Squared"),
("IRSR","1 - sqrt(R)","Inverted Roughness Square Root"),
("SRIR","sqrt(1 - R)","Square Root of Inverted Roughness"),
("SRIRS","sqrt(1 - R^2)","Square Root of Inverted Roughness Squared"),
]
def get_bake_target_maps(target):
if target == "SKETCHFAB":
return SKETCHFAB_MAPS
elif target == "GLTF":
return GLTF_MAPS
elif target == "UNITY_URP":
return UNITY_URP_MAPS
elif target == "UNITY_HDRP":
return UNITY_HDRP_MAPS
elif target == "RL":
return RL_MAPS
elif target == "GODOT":
return GODOT_MAPS
return BLENDER_MAPS
# global_suffix: ['target_suffix', 'prop_name']
RL_MAPS = {
"Diffuse": ["Diffuse", "diffuse_size"],
"AO": ["AO", "ao_size"],
"Blend": ["BlendMultiply", "diffuse_size"],
"Subsurface": ["SSS", "sss_size"],
"Thickness": ["Transmission", "thickness_size"],
"Metallic": ["Metallic", "metallic_size"],
"Specular": ["Specular", "specular_size"],
"Roughness": ["Roughness", "roughness_size"],
"Emission": ["Emission", "emissive_size"],
"Alpha": ["Alpha", "alpha_size"],
"Normal": ["Normal", "normal_size"],
"Bump": ["Bump", "bump_size"],
"MicroNormal": ["MicroNormal", "micronormal_size"],
"MicroNormalMask": ["MicroNormalMask", "micronormalmask_size"],
}
BLENDER_MAPS = {
"Diffuse": ["Diffuse", "diffuse_size"],
"Subsurface": ["Subsurface", "sss_size"],
"Metallic": ["Metallic", "metallic_size"],
"Specular": ["Specular", "specular_size"],
"Roughness": ["Roughness", "roughness_size"],
"Emission": ["Emission", "emissive_size"],
"Alpha": ["Alpha", "alpha_size"],
"Transmission": ["Transmission", "transmission_size"],
"Normal": ["Normal", "normal_size"],
"Bump": ["Bump", "bump_size"],
"MicroNormal": ["MicroNormal", "micronormal_size"],
"MicroNormalMask": ["MicroNormalMask", "micronormalmask_size"],
}
GODOT_MAPS = {
"Diffuse": ["Diffuse", "diffuse_size"],
"Subsurface": ["Subsurface", "sss_size"],
"Metallic": ["Metallic", "metallic_size"],
"Specular": ["Specular", "specular_size"],
"Roughness": ["Roughness", "roughness_size"],
"Emission": ["Emission", "emissive_size"],
"Alpha": ["Alpha", "alpha_size"],
"Transmission": ["Transmission", "transmission_size"],
"Normal": ["Normal", "normal_size"],
"Bump": ["Bump", "bump_size"],
}
SKETCHFAB_MAPS = {
"Diffuse": ["diffuse", "diffuse_size"],
"AO": ["ao", "ao_size"],
"Subsurface": ["subsurface", "sss_size"],
"Thickness": ["thickness", "thickness_size"],
"Metallic": ["metallic", "metallic_size"],
"Specular": ["specularf0", "specular_size"],
"Roughness": ["roughness", "roughness_size"],
"Emission": ["emission", "emissive_size"],
"Alpha": ["opacity", "alpha_size"],
"Normal": ["normal", "normal_size"],
"Bump": ["bump", "bump_size"],
}
GLTF_MAPS = {
"Diffuse": ["baseColor", "basemap_size"],
"AO": ["occlusion", "gltf_size"],
"Metallic": ["metallic", "gltf_size"],
"Roughness": ["roughness", "gltf_size"],
"Emission": ["emission", "emissive_size"],
"Alpha": ["alpha", "basemap_size"],
"Normal": ["normal", "normal_size"],
# packed maps
"BaseMap": ["baseMap", "basemap_size"],
"GLTF": ["glTF", "gltf_size"],
}
UNITY_URP_MAPS = {
"Diffuse": ["Diffuse", "basemap_size"],
"AO": ["Occlusion", "ao_size"],
"Metallic": ["Metallic", "metallic_alpha_size"],
"Roughness": ["Roughness", "metallic_alpha_size"],
"Emission": ["Emission", "emission_size"],
"Alpha": ["Opacity", "basemap_size"],
"Normal": ["Normal", "normal_size"],
"Bump": ["bump", "bump_size"],
"MicroNormal": ["Mask", "micronormalmask_size"],
"MicroNormalMask": ["Detail", "detail_size"],
# packed maps
"BaseMap": ["BaseMap", "basemap_size"],
"MetallicAlpha": ["MetallicAlpha", "metallic_alpha_size"],
}
UNITY_HDRP_MAPS = {
"Diffuse": ["Diffuse", "basemap_size"],
"AO": ["Occlusion", "mask_size"],
"Subsurface": ["Subsurface", "sss_size"],
"Thickness": ["Thickness", "thickness_size"],
"Metallic": ["Metallic", "mask_size"],
"Roughness": ["Roughness", "mask_size"],
"Emission": ["Emission", "emission_size"],
"Alpha": ["Opacity", "basemap_size"],
"Normal": ["Normal", "normal_size"],
"Bump": ["bump", "bump_size"],
"MicroNormal": ["MicroNormal", "detail_size"],
"MicroNormalMask": ["MicroNormalMask", "mask_size"],
# packed maps
"BaseMap": ["BaseMap", "basemap_size"],
"Mask": ["Mask", "mask_size"],
"Detail": ["Detail", "detail_size"],
}
TEX_LIST = [
("64","64 x 64","64 x 64 texture size"),
("128","128 x 128","128 x 128 texture size"),
("256","256 x 256","256 x 256 texture size"),
("512","512 x 512","512 x 512 texture size"),
("1024","1024 x 1024","1024 x 1024 texture size"),
("2048","2048 x 2048","2048 x 2048 texture size"),
("4096","4096 x 4096","4096 x 4096 texture size"),
("8192","8192 x 8192","8192 x 8192 texture size"),
]
TEX_SIZE_DETECT = {
"diffuse_size": [
["DIFFUSE"], ["Base Color:DIFFUSE"]
],
"ao_size": [
["AO"], ["Base Color:AO"]
],
"blend_size": [
["BLEND1"], ["Base Color:BLEND"]
],
"sss_size": [
["SSS"], None
],
"thickness_size": [
["TRANSMISSION"], None
],
"transmission_size": [
["TRANSMISSION_OVERRIDE"], ["Transmission"]
# note: there is no '_TRANSMISSION_B', it's just a key to override the
# transmission texture size in the TEX_SIZE_OVERRIDE list...
],
"specular_size": [
["SPECULAR", "SPECMASK"], ["Specular"]
],
"metallic_size": [
["METALLIC"], ["Metallic"]
],
"roughness_size": [
["ROUGHNESS"], ["Roughness"]
],
"smoothness_size": [
["ROUGHNESS"], ["Roughness"]
],
"emission_size": [
["EMISSION"], ["Emission"]
],
"alpha_size": [
["ALPHA"], ["Alpha"]
],
"normal_size": [
["NORMAL", "NORMALBLEND", "SCLERANORMAL"], ["Normal:NORMAL"]
],
"bump_size": [
["BUMP"], ["Normal:BUMP"]
],
"detail_size": [
["MICRONORMAL"], None
],
"micronormalmask_size": [
["MICRONMASK"], None
],
"micronormal_size": [
["MICRONORMAL"], None
],
"mask_size": [
["ROUGHNESS", "AO", "METALLIC", "MICRONMASK"],
["Base Color:AO", "Roughness", "Metallic"]
],
"metallic_alpha_size": [
["ROUGHNESS", "METALLIC"],
["Roughness", "Metallic"]
],
"gltf_size": [
["AO", "ROUGHNESS", "METALLIC"],
["Base Color:AO", "Roughness", "Metallic"]
],
"basemap_size": [
["DIFFUSE", "ALPHA"],
["Base Color:DIFFUSE", "Alpha"]
],
}
# override the texture size for procedurally generated maps
TEX_SIZE_OVERRIDE = {
"CORNEA_LEFT": {
"ROUGHNESS": 256,
"SSS": 256,
"SPECULAR": 256,
"ALPHA": 256,
"TRANSMISSION_OVERRIDE": 256,
},
"CORNEA_RIGHT": {
"ROUGHNESS": 256,
"SSS": 256,
"SPECULAR": 256,
"ALPHA": 256,
"TRANSMISSION_OVERRIDE": 256,
},
"EYE_LEFT": {
"ROUGHNESS": 256,
"SSS": 256,
"SPECULAR": 256,
},
"EYE_RIGHT": {
"ROUGHNESS": 256,
"SSS": 256,
"SPECULAR": 256,
},
"OCCLUSION_LEFT": {
"ALPHA": 256,
},
"OCCLUSION_RIGHT": {
"ALPHA": 256,
},
"OCCLUSION_PLUS_LEFT": {
"ALPHA": 256,
},
"OCCLUSION_PLUS_RIGHT": {
"ALPHA": 256,
},
"HAIR": {
"BUMP": 2048,
},
"SMART_HAIR": {
"BUMP": 2048,
},
"SCALP": {
"BUMP": 2048,
},
}
SPRING_IK_LAYER = 19
SPRING_FK_LAYER = 20
SPRING_TWEAK_LAYER = 21
ORG_BONE_LAYER = 31
MCH_BONE_LAYER = 30
DEF_BONE_LAYER = 29
ROOT_BONE_LAYER = 28
SIM_BONE_LAYER = 27
HIDE_BONE_LAYER = 23
SPRING_EDIT_LAYER = 25
SPRING_ROOT_LAYER = 24
block_property_update = False
@@ -0,0 +1,110 @@
# Copyright (C) 2021 Victor Soupday
# This file is part of CC/iC Blender Tools <https://github.com/soupday/cc_blender_tools>
#
# CC/iC Blender Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CC/iC Blender Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CC/iC Blender Tools. If not, see <https://www.gnu.org/licenses/>.
import math
import os
import bpy
from mathutils import Vector, Quaternion, Matrix, Euler, Color
from . import colorspace, imageutils, nodeutils, rigidbody, physics, modifiers, utils, vars
def get_default_hdri_path(hdri_name):
bin_dir, bin_file = os.path.split(bpy.app.binary_path)
version = bpy.app.version_string[:4]
hdri_path = os.path.join(bin_dir, version, "datafiles", "studiolights", "world", hdri_name)
return hdri_path
def copy_material_to_render_world(context):
shading = utils.get_view_3d_shading(context)
if shading:
studio_light = shading.selected_studio_light
ibl_path = studio_light.path
loc = Vector((0,0,0))
rot_z = shading.studiolight_rotate_z
rot = Vector((0, 0, rot_z))
str = shading.studiolight_intensity
col = (0,0,0,1)
world_setup(context, ibl_path, col, loc, rot, 1.0, str)
def world_setup(context, hdri_path: str, ambient_color, loc: Vector, rot: Vector, sca: float, str: float):
if type(ambient_color) is Color:
ambient_color = (ambient_color.r, ambient_color.g, ambient_color.b, 1.0)
shading = utils.get_view_3d_shading(context)
if hdri_path and os.path.exists(hdri_path):
bpy.context.scene.world.use_nodes = True
nodes = bpy.context.scene.world.node_tree.nodes
links = bpy.context.scene.world.node_tree.links
nodes.clear()
tc_node = nodeutils.make_shader_node(nodes, "ShaderNodeTexCoord")
mp_node = nodeutils.make_shader_node(nodes, "ShaderNodeMapping")
et_node = nodeutils.make_shader_node(nodes, "ShaderNodeTexEnvironment")
bg_node = nodeutils.make_shader_node(nodes, "ShaderNodeBackground")
wo_node = nodeutils.make_shader_node(nodes, "ShaderNodeOutputWorld")
ab_node = nodeutils.make_shader_node(nodes, "ShaderNodeRGB")
am_node = nodeutils.make_shader_node(nodes, "ShaderNodeMix")
tc_node.location = (-820, 350)
mp_node.location = (-610, 370)
et_node.location = (-330, 330)
ab_node.location = (-280, 60)
am_node.location = (10, 310)
bg_node.location = (200, 300)
wo_node.location = (420, 300)
am_node.data_type = "RGBA"
am_node.blend_type = "ADD"
am_node.clamp_result = False
am_node.clamp_factor = False
nodeutils.set_node_input_value(am_node, "Factor", str)
bg_node.name = utils.unique_name("(rl_background_node)")
ab_node.name = utils.unique_name("(rl_ambient_node)")
nodeutils.set_node_output_value(ab_node, "Color", ambient_color)
nodeutils.set_node_input_value(bg_node, "Strength", 1.0)
nodeutils.set_node_input_value(mp_node, "Location", loc)
nodeutils.set_node_input_value(mp_node, "Rotation", rot)
nodeutils.set_node_input_value(mp_node, "Scale", Vector((sca, sca, sca)))
nodeutils.link_nodes(links, et_node, "Color", am_node, "B")
nodeutils.link_nodes(links, ab_node, "Color", am_node, "A")
nodeutils.link_nodes(links, tc_node, "Generated", mp_node, "Vector")
nodeutils.link_nodes(links, mp_node, "Vector", et_node, "Vector")
nodeutils.link_nodes(links, am_node, "Result", bg_node, "Color")
nodeutils.link_nodes(links, bg_node, "Background", wo_node, "Surface")
et_node.image = imageutils.load_image(hdri_path, "Linear")
if shading:
shading.use_scene_world = False
shading.use_scene_world_render = True
else:
bpy.context.scene.world.use_nodes = True
nodes = bpy.context.scene.world.node_tree.nodes
links = bpy.context.scene.world.node_tree.links
nodes.clear()
bg_node = nodeutils.make_shader_node(nodes, "ShaderNodeBackground")
wo_node = nodeutils.make_shader_node(nodes, "ShaderNodeOutputWorld")
ab_node = nodeutils.make_shader_node(nodes, "ShaderNodeRGB")
bg_node.location = (10,300)
wo_node.location = (300,300)
ab_node.location = (-280, 60)
bg_node.name = utils.unique_name("(rl_background_node)")
ab_node.name = utils.unique_name("(rl_ambient_node)")
#nodeutils.set_node_input_value(bg_node, "Strength", str)
nodeutils.set_node_output_value(ab_node, "Color", ambient_color)
nodeutils.link_nodes(links, bg_node, "Background", wo_node, "Surface")
nodeutils.link_nodes(links, ab_node, "Color", bg_node, "Color")
if shading:
shading.use_scene_world = False
shading.use_scene_world_render = True
@@ -1,6 +1,6 @@
{
"last_check": "2025-12-01 11:04:00.288525",
"backup_date": "",
"last_check": "2025-12-30 14:52:43.839129",
"backup_date": "December-30-2025",
"update_ready": false,
"ignore": false,
"just_restored": false,
@@ -2013,26 +2013,28 @@ def load_csv(chr_cache, file_path):
key_action = utils.make_action(f"{chr_cache.character_name}_ARKit_Proxy_Head", slot_type="KEY", clear=True, reuse=True)
arm_action = utils.make_action(f"{chr_cache.character_name}_ARKit_Proxy", slot_type="OBJECT", clear=True, reuse=True)
key_channels = utils.get_action_channels(key_action, slot_type="KEY")
for key in keys:
fcurve = key_channels.fcurves.new(f"key_blocks[\"{key}\"].value")
for tcurve in tcurves:
if tcurve.name.lower() == key.lower():
tcurve.to_fcurve(fcurve)
break
if key_channels:
for key in keys:
fcurve = key_channels.fcurves.new(f"key_blocks[\"{key}\"].value")
for tcurve in tcurves:
if tcurve.name.lower() == key.lower():
tcurve.to_fcurve(fcurve)
break
utils.safe_set_action(proxy_mesh.data.shape_keys, key_action)
bone_channels = utils.get_action_channels(arm_action, slot_type="OBJECT")
for tcurve_name, bone_def in facerig_data.ARK_BONE_TARGETS.items():
for tcurve in tcurves:
if tcurve.name.lower() == tcurve_name.lower():
bone_name = bone_def["bone"]
bone = proxy_rig.pose.bones[bone_name]
bone.rotation_mode = "XYZ"
axis = bone_def["axis"]
rotation = bone_def["rotation"] * math.pi / 180
prop, var, index = facerig_data.ROT_AXES[axis]
data_path = bone.path_from_id(prop)
fcurve = bone_channels.fcurves.new(data_path, index=index)
tcurve.to_fcurve(fcurve, rotation)
if bone_channels:
for tcurve_name, bone_def in facerig_data.ARK_BONE_TARGETS.items():
for tcurve in tcurves:
if tcurve.name.lower() == tcurve_name.lower():
bone_name = bone_def["bone"]
bone = proxy_rig.pose.bones[bone_name]
bone.rotation_mode = "XYZ"
axis = bone_def["axis"]
rotation = bone_def["rotation"] * math.pi / 180
prop, var, index = facerig_data.ROT_AXES[axis]
data_path = bone.path_from_id(prop)
fcurve = bone_channels.fcurves.new(data_path, index=index)
tcurve.to_fcurve(fcurve, rotation)
utils.safe_set_action(proxy_rig, arm_action)
+31 -22
View File
@@ -1218,7 +1218,7 @@ def store_camera_cache_keyframes(actor: LinkActor, frame):
store_cache_curves_frame(camera_cache, "f_stop", frame, start, data.dof.aperture_fstop)
def write_action_rotation_cache_curve(action: bpy.types.Action, cache, prop, obj, num_frames, group_name=None, slot=None):
def write_action_rotation_cache_curve(action: bpy.types.Action, cache, prop, obj, num_frames, group_name=None, slot=None, slot_type=None):
cache_type = cache[prop]["type"]
data_path = None
if cache_type == "QUATERNION":
@@ -1233,27 +1233,28 @@ def write_action_rotation_cache_curve(action: bpy.types.Action, cache, prop, obj
data_path = obj.path_from_id("rotation_euler")
if not group_name:
group_name = "Rotation Euler"
write_action_cache_curve(action, cache, prop, data_path, num_frames, group_name, slot=slot)
write_action_cache_curve(action, cache, prop, data_path, num_frames, group_name, slot=slot, slot_type=slot_type)
def write_action_cache_curve(action: bpy.types.Action, cache, prop, data_path, num_frames, group_name, slot=None):
def write_action_cache_curve(action: bpy.types.Action, cache, prop, data_path, num_frames, group_name, slot=None, slot_type=None):
if not LINK_DATA.set_keyframes: return
prop_cache = cache[prop]
num_curves = len(prop_cache["curves"])
channels = utils.get_action_channels(action, slot)
fcurve: bpy.types.FCurve = None
if group_name not in channels.groups:
channels.groups.new(group_name)
for i in range(0, num_curves):
cache_curve = prop_cache["curves"][i]
fcurve = channels.fcurves.new(data_path, index=i)
fcurve.keyframe_points.add(num_frames)
set_count = num_frames * 2
if set_count < len(cache_curve):
# if setting fewer frames than are in the cache (sequence was stopped early)
fcurve.keyframe_points.foreach_set('co', cache_curve[:set_count])
else:
fcurve.keyframe_points.foreach_set('co', cache_curve)
channels = utils.get_action_channels(action, slot=slot, slot_type=slot_type)
if channels:
fcurve: bpy.types.FCurve = None
if group_name not in channels.groups:
channels.groups.new(group_name)
for i in range(0, num_curves):
cache_curve = prop_cache["curves"][i]
fcurve = channels.fcurves.new(data_path, index=i)
fcurve.keyframe_points.add(num_frames)
set_count = num_frames * 2
if set_count < len(cache_curve):
# if setting fewer frames than are in the cache (sequence was stopped early)
fcurve.keyframe_points.foreach_set('co', cache_curve[:set_count])
else:
fcurve.keyframe_points.foreach_set('co', cache_curve)
def write_sequence_actions(actor: LinkActor, num_frames):
@@ -1268,14 +1269,18 @@ def write_sequence_actions(actor: LinkActor, num_frames):
if rig_action:
utils.clear_action(rig_action, "OBJECT", rig_action.name)
bone_cache = actor.cache["bones"]
rig_slot = utils.get_action_slot(rig_action, "OBJECT")
for bone_name in bone_cache:
pose_bone: bpy.types.PoseBone = rig.pose.bones[bone_name]
write_action_cache_curve(rig_action, bone_cache[bone_name], "loc",
pose_bone.path_from_id("location"), num_frames, bone_name)
pose_bone.path_from_id("location"), num_frames, bone_name,
slot=rig_slot)
write_action_rotation_cache_curve(rig_action, bone_cache[bone_name], "rot",
pose_bone, num_frames, group_name=bone_name)
pose_bone, num_frames, group_name=bone_name,
slot=rig_slot)
write_action_cache_curve(rig_action, bone_cache[bone_name], "sca",
pose_bone.path_from_id("scale"), num_frames, bone_name)
pose_bone.path_from_id("scale"), num_frames, bone_name,
slot=rig_slot)
# re-apply action to fix slot
utils.safe_set_action(rig, rig_action)
@@ -1283,18 +1288,21 @@ def write_sequence_actions(actor: LinkActor, num_frames):
viseme_cache = actor.cache["visemes"]
for obj in objects:
obj_action = utils.safe_get_action(obj.data.shape_keys)
key_slot = utils.get_action_slot(obj_action, "KEY")
if obj_action:
utils.clear_action(obj_action, "KEY", obj_action.name)
for expression_name in expression_cache:
if expression_name in obj.data.shape_keys.key_blocks:
key = obj.data.shape_keys.key_blocks[expression_name]
write_action_cache_curve(obj_action, expression_cache, expression_name,
key.path_from_id("value"), num_frames, "Expression")
key.path_from_id("value"), num_frames, "Expression",
slot=key_slot)
for viseme_name in viseme_cache:
if viseme_name in obj.data.shape_keys.key_blocks:
key = obj.data.shape_keys.key_blocks[viseme_name]
write_action_cache_curve(obj_action, viseme_cache, viseme_name,
key.path_from_id("value"), num_frames, "Viseme")
key.path_from_id("value"), num_frames, "Viseme",
slot=key_slot)
utils.safe_set_action(obj.data.shape_keys, obj_action) # re-apply action to fix slot
# remove actions from non sequence objects
@@ -2731,6 +2739,7 @@ class LinkService():
json_data = decode_to_json(data)
request_type = json_data["type"]
actors_data = json_data["actors"]
json_data["FPS"] = bpy.context.scene.render.fps
for actor_data in actors_data:
name = actor_data["name"]
link_id = actor_data["link_id"]
+49 -16
View File
@@ -387,6 +387,12 @@ def rigid_body_sim_ui(chr_cache, arm, obj, layout : bpy.types.UILayout,
column.row().label(text="Rigid Body Cache:")
row = column.row()
row.operator("cc3.springbones", icon=utils.check_icon("LOOP_BACK"), text="Reset Simulation").param = "RESET_PHYSICS"
# frame dropping warning
if bpy.context.scene.sync_mode != "NONE":
row = column.row()
row.alert = True
row.label(text="Frame Dropping!", icon="ERROR")
#
row = column.row()
row.scale_y = 1.5
row.context_pointer_set("point_cache", rigidbody_point_cache)
@@ -398,6 +404,27 @@ def rigid_body_sim_ui(chr_cache, arm, obj, layout : bpy.types.UILayout,
row.operator("ptcache.bake", text="Bake Simulation", icon="REC", depress=rigidbody_baking).bake = True
def physics_all_dynamics_ui(layout : bpy.types.UILayout):
has_cloth, has_collision, has_rigidbody, all_baked, any_baked, all_baking, any_baking = physics.get_scene_physics_state()
layout.label(text="All Dynamics:", icon="PHYSICS")
column = layout.column(align=True)
column.operator("cc3.scene", icon="LOOP_BACK", text="Reset All").param = "PHYSICS_PREP_ALL"
# frame dropping warning
if bpy.context.scene.sync_mode != "NONE":
row = column.row(align=True)
row.alert = True
row.label(text="Frame Dropping!", icon="ERROR")
#
row = column.row(align=True)
row.scale_y = 1.5
row.alert = all_baked
all_depress = all_baking
if any_baked:
row.operator("ptcache.free_bake_all", text="Free All Dynamics", icon="REC")
else:
row.operator("ptcache.bake_all", text="Bake All Dynamics", icon="REC", depress=all_depress).bake = True
def cache_timeline_physics_ui(chr_cache, layout : bpy.types.UILayout):
if not chr_cache:
return
@@ -435,6 +462,12 @@ def cache_timeline_physics_ui(chr_cache, layout : bpy.types.UILayout):
row = grid_column.row(align=True)
row.operator("cc3.scene", icon="LOOP_BACK", text="Reset").param = "PHYSICS_PREP_CLOTH"
# frame dropping warning
if bpy.context.scene.sync_mode != "NONE":
row = grid_column.row(align=True)
row.alert = True
row.label(text="Frame Dropping!", icon="ERROR")
#
row = grid_column.row(align=True)
row.context_pointer_set("point_cache", cloth_point_cache)
row.scale_y = 1.5
@@ -452,6 +485,12 @@ def cache_timeline_physics_ui(chr_cache, layout : bpy.types.UILayout):
row = grid_column.row(align=True)
row.operator("cc3.scene", icon="LOOP_BACK", text="Reset").param = "PHYSICS_PREP_RBW"
# frame dropping warning
if bpy.context.scene.sync_mode != "NONE":
row = grid_column.row(align=True)
row.alert = True
row.label(text="Frame Dropping!", icon="ERROR")
#
row = grid_column.row(align=True)
row.context_pointer_set("point_cache", rigidbody_point_cache)
row.scale_y = 1.5
@@ -466,20 +505,7 @@ def cache_timeline_physics_ui(chr_cache, layout : bpy.types.UILayout):
layout.separator()
has_cloth, has_collision, has_rigidbody, all_baked, any_baked, all_baking, any_baking = physics.get_scene_physics_state()
layout.label(text="All Dynamics:", icon="PHYSICS")
column = layout.column(align=True)
column.operator("cc3.scene", icon="LOOP_BACK", text="Reset All").param = "PHYSICS_PREP_ALL"
row = column.row(align=True)
row.scale_y = 1.5
row.alert = all_baked
all_depress = all_baking
if any_baked:
row.operator("ptcache.free_bake_all", text="Free All Dynamics", icon="REC")
else:
row.operator("ptcache.bake_all", text="Bake All Dynamics", icon="REC", depress=all_depress).bake = True
physics_all_dynamics_ui(layout)
def character_tools_ui(context, layout: bpy.types.UILayout):
@@ -3298,7 +3324,7 @@ class CC3ToolsPhysicsPanel(bpy.types.Panel):
column.separator()
column.row().label(text="Cloth Simulation:")
column.row().label(text="Cloth Simulation:", icon="MATCLOTH")
if bpy.context.object:
column.label(text=bpy.context.object.name, icon="OBJECT_DATA")
@@ -3306,6 +3332,12 @@ class CC3ToolsPhysicsPanel(bpy.types.Panel):
row.operator("cc3.scene", icon=utils.check_icon("LOOP_BACK"), text="Reset Simulation").param = "PHYSICS_PREP_CLOTH"
if not has_cloth:
row.enabled = False
# frame dropping warning
if context.scene.sync_mode != "NONE":
row = column.row()
row.alert = True
row.label(text="Frame Dropping!", icon="ERROR")
#
row = column.row()
row.scale_y = 1.5
row.context_pointer_set("point_cache", cloth_point_cache)
@@ -3318,8 +3350,9 @@ class CC3ToolsPhysicsPanel(bpy.types.Panel):
if not has_cloth:
row.enabled = False
column.separator()
physics_all_dynamics_ui(layout)
column.separator()
# Physics Mesh Tools
layout.box().label(text="Mesh Correction", icon="MESH_DATA")
@@ -890,8 +890,11 @@ def physics_paint_strength_update(self, context):
props = vars.props()
if context.mode == "PAINT_TEXTURE":
ups = context.tool_settings.unified_paint_settings
prop_owner = ups if ups.use_unified_color else context.tool_settings.image_paint.brush
if not utils.B500():
ups = context.tool_settings.unified_paint_settings
prop_owner = ups if ups.use_unified_color else context.tool_settings.image_paint.brush
else:
prop_owner = context.tool_settings.image_paint.brush
s = props.physics_paint_strength
prop_owner.color = (s, s, s)
@@ -1021,12 +1024,6 @@ def delete_selected_weight_map(chr_cache, obj, mat):
obj.modifiers.remove(mix_mod)
def cloth_physics_point_cache_override(mod):
override = bpy.context.copy()
override["point_cache"] = mod.point_cache
return override
def get_context_physics_objects(context, from_selected=False):
props = vars.props()
chr_cache = props.get_context_character_cache(context)
@@ -1115,7 +1112,21 @@ def reset_physics_cache(obj, start, end):
return False
def reset_cache(context, all_objects = False):
def reset_physics(context: bpy.types.Context, all_objects=False):
# stop any playing animation
if context.screen.is_animation_playing:
bpy.ops.screen.animation_cancel(restore_frame=False)
# jump to end
bpy.ops.screen.frame_jump(end=True)
# reset the physics
reset_cache(context, all_objects=all_objects)
# reset the animation
bpy.ops.screen.frame_jump(end=False)
# set to no frame skip
context.scene.sync_mode = "NONE"
def reset_cache(context, all_objects=False):
if bpy.context.scene.use_preview_range:
start = bpy.context.scene.frame_preview_start
end = bpy.context.scene.frame_preview_end
@@ -1139,12 +1150,7 @@ def free_cache(obj):
# free the baked cache
if cloth_mod.point_cache.is_baked:
utils.log_info("Freeing point cache...")
if utils.B320():
with bpy.context.temp_override(point_cache=cloth_mod.point_cache):
bpy.ops.ptcache.free_bake()
else:
context_override = cloth_physics_point_cache_override(cloth_mod)
bpy.ops.ptcache.free_bake(context_override)
utils.safe_free_bake(cloth_mod.point_cache)
def separate_physics_materials(chr_cache, obj):
@@ -1391,10 +1397,11 @@ def remove_all_physics(chr_cache):
utils.log_indent()
objects_processed = []
for obj in chr_cache.get_cache_objects():
obj_cache = chr_cache.get_object_cache(obj)
if obj_cache and obj_cache.is_mesh() and obj not in objects_processed and not obj_cache.disabled:
remove_all_physics_mods(obj)
remove_collision_proxy(chr_cache, obj)
if utils.object_exists(obj):
obj_cache = chr_cache.get_object_cache(obj)
if obj_cache and obj_cache.is_mesh() and obj not in objects_processed and not obj_cache.disabled:
remove_all_physics_mods(obj)
remove_collision_proxy(chr_cache, obj)
chr_cache.physics_applied = False
utils.log_recess()
@@ -1487,6 +1494,7 @@ def set_physics_settings(op, context, param):
restore_collision_proxy_view(context, chr_cache)
for obj in context.selected_objects:
enable_cloth_physics(chr_cache, obj, True)
reset_physics(context)
elif param == "PHYSICS_REMOVE_CLOTH":
restore_collision_proxy_view(context, chr_cache)
@@ -1499,6 +1507,7 @@ def set_physics_settings(op, context, param):
for obj in objects:
enable_collision_physics(chr_cache, obj)
show_hide_collision_proxies(context, chr_cache, False)
reset_physics(context, all_objects=True)
elif param == "PHYSICS_REMOVE_COLLISION":
restore_collision_proxy_view(context, chr_cache)
@@ -1506,6 +1515,7 @@ def set_physics_settings(op, context, param):
for obj in objects:
disable_collision_physics(chr_cache, obj)
show_hide_collision_proxies(context, chr_cache, False)
reset_physics(context, all_objects=True)
elif param == "PHYSICS_ADD_WEIGHTMAP":
if obj:
@@ -1596,6 +1606,7 @@ def set_physics_settings(op, context, param):
if chr_cache:
restore_collision_proxy_view(context, chr_cache)
enable_physics(chr_cache)
reset_physics(context)
op.report({'INFO'}, f"Physics enabled for {chr_cache.character_name}")
elif param == "REMOVE_PHYSICS":
@@ -1608,6 +1619,7 @@ def set_physics_settings(op, context, param):
if chr_cache:
restore_collision_proxy_view(context, chr_cache)
apply_all_physics(chr_cache)
reset_physics(context)
op.report({'INFO'}, f"Physics applied to {chr_cache.character_name}")
elif param == "PHYSICS_INC_STRENGTH":
@@ -2222,7 +2222,7 @@ class CC3CharacterCache(bpy.types.PropertyGroup):
def get_object_cache(self, obj, include_disabled=False, by_id=None, strict=False) -> CC3ObjectCache:
"""Returns the object cache for this object.
"""
if obj:
if utils.object_exists(obj):
# by object
if not strict and not by_id:
by_id = utils.get_rl_object_id(obj)
@@ -1375,8 +1375,14 @@ def store_expression_set(chr_cache, cc3_rig, rigify_rig=None, rigify_data=None):
if rigify_rig and cc3_rig and rigify_data:
rigify_bone_name = bones.get_rigify_control_bone(rigify_rig, rigify_data.bone_mapping, bone_name, extra_mapping=expression_meta_bone_map)
offset_bone_name = offset_bone_map[bone_name] if bone_name in offset_bone_map else ""
tra = utils.array_to_vector(expression_def["Bones"][bone_name]["Translate"])
rot = utils.array_to_quaternion(expression_def["Bones"][bone_name]["Rotation"])
try:
tra = utils.array_to_vector(expression_def["Bones"][bone_name]["Translate"])
except:
tra = Vector((0,0,0))
try:
rot = utils.array_to_quaternion(expression_def["Bones"][bone_name]["Rotation"])
except:
rot = Quaternion((1,0,0,0))
R, tra_local = bones.convert_relative_transform(cc3_rig, bone_name, rigify_rig, rigify_bone_name, tra, rot, True)
if R:
rot_euler = rot.to_euler("XYZ")
@@ -761,8 +761,7 @@ def reset_cache(context):
# free the bake
if cache.is_baked:
utils.log_info("Freeing baked point cache...")
context_override = {"point_cache": bpy.context.scene.rigidbody_world.point_cache}
bpy.ops.ptcache.free_bake(context_override)
utils.safe_free_bake(bpy.context.scene.rigidbody_world.point_cache)
# invalidate the cache
utils.log_info("Invalidating point cache...")
@@ -799,7 +798,7 @@ def reset_cache(context):
rigidbody_world.solver_iterations = interations
def create_capsule_collider(name, location, rotation, scale, radius, length, axis):
def create_capsule_collider(name, parent, location, rotation, scale, radius, length, axis):
bm = bmesh.new()
try:
bmesh.ops.create_uvsphere(bm, u_segments=8, v_segments=9, radius=radius)
@@ -819,11 +818,11 @@ def create_capsule_collider(name, location, rotation, scale, radius, length, axi
mesh.update()
bm.free()
object = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(object)
object.display_type = 'WIRE'
capsule = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(capsule)
capsule.display_type = 'WIRE'
object.location = location
capsule.location = parent.matrix_world @ location
r = Quaternion()
r.identity()
if axis == "X":
@@ -838,12 +837,12 @@ def create_capsule_collider(name, location, rotation, scale, radius, length, axi
r.rotate(mat_rot_x)
r.rotate(rotation)
utils.set_transform_rotation(object, r)
object.scale = scale
return object
utils.set_transform_rotation(capsule, rotate_quat(parent.matrix_world, r))
capsule.scale = parent.scale * scale
return capsule
def create_sphere_collider(name, location, rotation, scale, radius):
def create_sphere_collider(name, parent, location, rotation, scale, radius):
bm = bmesh.new()
try:
bmesh.ops.create_uvsphere(bm, u_segments=8, v_segments=9, radius=radius)
@@ -855,17 +854,17 @@ def create_sphere_collider(name, location, rotation, scale, radius):
mesh.update()
bm.free()
object = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(object)
object.display_type = 'WIRE'
sphere = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(sphere)
sphere.display_type = 'WIRE'
object.location = location
utils.set_transform_rotation(object, rotation)
object.scale = scale
return object
sphere.location = parent.matrix_world @ location
utils.set_transform_rotation(sphere, rotate_quat(parent.matrix_world, rotation))
sphere.scale = parent.scale * scale
return sphere
def create_box_collider(name, location, rotation, scale, extents, axis):
def create_box_collider(name, parent, location, rotation, scale, extents, axis):
bm = bmesh.new()
bmesh.ops.create_cube(bm, size=1.0)
bm.verts.ensure_lookup_table()
@@ -882,15 +881,17 @@ def create_box_collider(name, location, rotation, scale, extents, axis):
mesh.update()
bm.free()
object = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(object)
object.display_type = 'WIRE'
box = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(box)
box.display_type = 'WIRE'
object.location = location
utils.set_transform_rotation(object, rotation)
object.scale = scale
return object
box.location = parent.matrix_world @ location
utils.set_transform_rotation(box, rotate_quat(parent.matrix_world, rotation))
box.scale = parent.scale * scale
return box
def rotate_quat(M: Matrix, Q: Quaternion):
return (M @ Q.to_matrix().to_4x4()).to_quaternion()
def fix_quat(q):
return [q[3], q[0], q[1], q[2]]
@@ -963,25 +964,25 @@ def build_rigid_body_colliders(chr_cache, json_data, first_import = False, bone_
margin = shape_data["Margin"] * 0.01
friction = shape_data["Friction"]
elasticity = shape_data["Elasticity"] / 10.0
translate = Vector(shape_data["WorldTranslate"]) * 0.01
translate = Vector(shape_data["WorldTranslate"])
rotate = Quaternion(fix_quat(shape_data["WorldRotationQ"]))
scale = shape_data["WorldScale"]
scale = Vector(shape_data["WorldScale"])
if use_bind_data:
translate = Vector(shape_data["BindPose WorldTranslate"]) * 0.01
translate = Vector(shape_data["BindPose WorldTranslate"])
rotate = Quaternion(fix_quat(shape_data["BindPose WorldRotationQ"]))
scale = shape_data["BindPose WorldScale"]
scale = Vector(shape_data["BindPose WorldScale"])
axis = shape_data["BindPose Bound Axis"]
obj : bpy.types.Object = None
if shape == "Box":
extent = Vector(shape_data["Extent"]) * 0.01 / 2.0
obj = create_box_collider(name, translate, rotate, scale, extent, axis)
extent = Vector(shape_data["Extent"]) / 2.0
obj = create_box_collider(name, arm, translate, rotate, scale, extent, axis)
elif shape == "Capsule":
radius = shape_data["Radius"] * 0.01
length = shape_data["Capsule Length"] * 0.01
obj = create_capsule_collider(name, translate, rotate, scale, radius, length, axis)
radius = shape_data["Radius"]
length = shape_data["Capsule Length"]
obj = create_capsule_collider(name, arm, translate, rotate, scale, radius, length, axis)
elif shape == "Sphere":
radius = shape_data["Radius"] * 0.01
obj = create_sphere_collider(name, translate, rotate, scale, radius)
radius = shape_data["Radius"]
obj = create_sphere_collider(name, arm, translate, rotate, scale, radius)
if not obj:
continue
+103 -16
View File
@@ -53,8 +53,8 @@ def name_in_data_paths(action, name, slot_type=None):
def name_in_pose_bone_data_paths_regex(action, name, slot_type=None):
channels = utils.get_action_channels(action, slot_type=slot_type)
name = ".*" + name
if channels:
name = ".*" + name
for fcurve in channels.fcurves:
if re.match(name, fcurve.data_path):
return True
@@ -2877,21 +2877,22 @@ class CCICActionImportOptions(bpy.types.Operator):
column.row().prop(props, "action_mode")
column.row().prop(props, "frame_mode")
column.row().prop(props, "use_masking")
row = column.row()
row.template_list("CCIC_RigMixBones_UL_List", "rig_mix_bones_list",
arm.data, "bones",
props, "rig_mix_bones_list_index",
rows=8, maxrows=8)
col = row.column()
col.separator(factor=4.0)
col.operator("ccic.action_import_functions", text="", icon="PLAY").param = "ADD_BONE"
col.separator(factor=4.0)
col.operator("ccic.action_import_functions", text="", icon="PLAY_REVERSE").param = "REMOVE_BONE"
col.separator(factor=4.0)
row.template_list("CCIC_ImportMixBones_UL_List", "import_mix_bones_list",
props, "import_mix_bones",
props, "import_mix_bones_list_index",
rows=8, maxrows=8)
if props.use_masking:
row = column.row()
row.template_list("CCIC_RigMixBones_UL_List", "rig_mix_bones_list",
arm.data, "bones",
props, "rig_mix_bones_list_index",
rows=8, maxrows=8)
col = row.column()
col.separator(factor=4.0)
col.operator("ccic.action_import_functions", text="", icon="PLAY").param = "ADD_BONE"
col.separator(factor=4.0)
col.operator("ccic.action_import_functions", text="", icon="PLAY_REVERSE").param = "REMOVE_BONE"
col.separator(factor=4.0)
row.template_list("CCIC_ImportMixBones_UL_List", "import_mix_bones_list",
props, "import_mix_bones",
props, "import_mix_bones_list_index",
rows=8, maxrows=8)
else:
column.label(text="No Character!")
@@ -2911,3 +2912,89 @@ class CCICActionImportOptions(bpy.types.Operator):
@classmethod
def description(cls, context, properties):
return "Description"
def shift_actions(action, to_frame, frame_start = 1):
if to_frame == frame_start:
return
fcurves = utils.get_action_fcurves(action)
fcurve: bpy.types.FCurve = None
for fcurve in fcurves:
num_points = len(fcurve.keyframe_points)
points_data = [0.0,0.0]*num_points
fcurve.keyframe_points.foreach_get('co', points_data)
for i in range(0, num_points):
frame = points_data[i*2]
points_data[i*2] = frame - frame_start + to_frame
fcurve.keyframe_points.foreach_set('co', points_data)
def mix_actions(src_action, dst_action, frame_start):
src_fcurves = utils.get_action_fcurves(src_action)
dst_fcurves = utils.get_action_fcurves(dst_action)
fcurve_map = {}
for i, src_curve in enumerate(src_fcurves):
for j, dst_curve in enumerate(dst_fcurves):
if src_curve.data_path == dst_curve.data_path:
fcurve_map[i] = j
break
for i, src_curve in enumerate(src_fcurves):
if i in fcurve_map:
j = fcurve_map[i]
dst_curve = dst_fcurves[j]
mix_fcurve(src_curve, dst_curve, frame_start)
def mix_fcurve(src_curve: bpy.types.FCurve, dst_curve: bpy.types.FCurve, frame_start):
src_curve.keyframe_points.foreach_get()
num_src_points = len(src_curve.keyframe_points)
src_data = [0.0,0.0]*num_src_points
src_curve.keyframe_points.foreach_get('co', src_data)
num_dst_points = len(dst_curve.keyframe_points)
dst_data = [0.0,0.0]*num_dst_points
dst_curve.keyframe_points.foreach_get('co', dst_data)
src_start_frame = src_data[0]
src_end_frame = src_data[-2]
num_pre_points = 0
num_post_points = 0
src_index = -1
post_index = -1
for i in range(0, num_dst_points):
dst_frame = dst_data[i*2]
if dst_frame < src_start_frame:
num_pre_points += 1
if dst_frame > src_end_frame:
if post_index < 0:
post_index = i
num_post_points += 1
if src_start_frame >= dst_frame and src_index < 0:
src_index = i
num_result_points = num_pre_points + num_src_points + num_post_points
result_data = [0.0, 0.0]*num_result_points
# copy pre destination range
for i in range(0, num_pre_points * 2):
result_data[i] = dst_data[i]
# copy the source range
offset = src_index * 2
for i in range(0, num_src_points * 2):
result_data[i + offset] = src_data[i]
# copy the post destination range
offset = post_index * 2
for i in range(0, num_post_points * 2):
result_data[i + offset] = dst_data[i]
dst_curve.keyframe_points.clear()
dst_curve.keyframe_points.add(num_result_points)
dst_curve.keyframe_points.foreach_set('co', result_data)
+9 -8
View File
@@ -430,14 +430,15 @@ def add_cache_rotation_fcurves(obj, action: bpy.types.Action, cache, num_frames,
def add_cache_fcurves(action: bpy.types.Action, data_path, cache, num_frames, group_name=None, slot=None):
channels = utils.get_action_channels(action, slot)
num_curves = len(cache)
fcurve: bpy.types.FCurve = None
if group_name not in channels.groups:
channels.groups.new(group_name)
for i in range(0, num_curves):
fcurve = channels.fcurves.new(data_path, index=i)
fcurve.group = channels.groups[group_name]
fcurve.keyframe_points.add(num_frames)
fcurve.keyframe_points.foreach_set('co', cache[i])
if channels:
fcurve: bpy.types.FCurve = None
if group_name not in channels.groups:
channels.groups.new(group_name)
for i in range(0, num_curves):
fcurve = channels.fcurves.new(data_path, index=i)
fcurve.group = channels.groups[group_name]
fcurve.keyframe_points.add(num_frames)
fcurve.keyframe_points.foreach_set('co', cache[i])
def add_camera_markers(camera, cache, num_frames, start):
@@ -2682,7 +2682,7 @@ class CC3Scene(bpy.types.Operator):
# reset the physics
physics.reset_cache(context)
# reset the animation
bpy.ops.screen.frame_jump(end = False)
bpy.ops.screen.frame_jump(end=False)
elif self.param == "PHYSICS_PREP_RBW":
# stop any playing animation
@@ -2691,20 +2691,20 @@ class CC3Scene(bpy.types.Operator):
# reset the physics
rigidbody.reset_cache(context)
# reset the animation
bpy.ops.screen.frame_jump(end = False)
bpy.ops.screen.frame_jump(end=False)
elif self.param == "PHYSICS_PREP_ALL":
# stop any playing animation
if context.screen.is_animation_playing:
bpy.ops.screen.animation_cancel(restore_frame=False)
# jump to end
bpy.ops.screen.frame_jump(end = True)
bpy.ops.screen.frame_jump(end=True)
# reset the physics
physics.reset_cache(context, all_objects=True)
rigidbody.reset_cache(context)
bpy.ops.ptcache.free_bake_all()
# reset the animation
bpy.ops.screen.frame_jump(end = False)
bpy.ops.screen.frame_jump(end=False)
context.view_layer.update()
elif self.param == "CYCLES_SETUP":
@@ -128,7 +128,10 @@ def do_multires_bake(context, chr_cache, multires_mesh, layer_target, apply_shap
utils.set_only_active_object(ao_body)
set_multi_res_level(ao_body, view_level=9, sculpt_level=9, render_level=9)
utils.log_info(f"Baking {layer_target} AO...")
bpy.context.scene.render.use_bake_multires = False
if utils.B500():
bpy.context.scene.render.bake.use_multires = False
else:
bpy.context.scene.render.use_bake_multires = False
# *cycles* bake type to AO
bpy.context.scene.cycles.bake_type = "AO"
if prefs.bake_use_gpu:
@@ -141,7 +144,10 @@ def do_multires_bake(context, chr_cache, multires_mesh, layer_target, apply_shap
# Displacement Baking
select_bake_images(multires_mesh, BAKE_TYPE_DISPLACEMENT, layer_target)
bpy.context.scene.render.use_bake_multires = True
if utils.B500():
bpy.context.scene.render.bake.use_multires = True
else:
bpy.context.scene.render.use_bake_multires = True
bake.set_cycles_samples(context, samples=2)
# copy the body for displacement baking
@@ -167,7 +173,10 @@ def do_multires_bake(context, chr_cache, multires_mesh, layer_target, apply_shap
set_multi_res_level(obj, view_level=0, sculpt_level=9, render_level=9)
# bake the displacement mask
utils.log_info(f"Baking {layer_target} sub displacement {obj.name}")
bpy.context.scene.render.bake_type = BAKE_TYPE_DISPLACEMENT
if utils.B500():
bpy.context.scene.render.bake.type = BAKE_TYPE_DISPLACEMENT
else:
bpy.context.scene.render.bake_type = BAKE_TYPE_DISPLACEMENT
bpy.ops.object.bake_image()
utils.delete_mesh_object(obj)
@@ -191,7 +200,10 @@ def do_multires_bake(context, chr_cache, multires_mesh, layer_target, apply_shap
# bake the normals
utils.log_info(f"Baking {layer_target} normals...")
bpy.context.scene.render.bake_type = BAKE_TYPE_NORMALS
if utils.B500():
bpy.context.scene.render.bake.type = BAKE_TYPE_NORMALS
else:
bpy.context.scene.render.bake_type = BAKE_TYPE_NORMALS
bpy.ops.object.bake_image()
utils.log_recess()
@@ -554,6 +554,7 @@ class CC3OperatorSpringBones(bpy.types.Operator):
#utils.restore_mode_selection_state(mode_selection)
if self.param == "BAKE_PHYSICS":
context.scene.sync_mode = "NONE"
utils.object_mode_to(arm)
reset_spring_physics(context)
utils.log_info("Baking rigid body world point cache...")
+42 -4
View File
@@ -1975,9 +1975,9 @@ def make_action(name, reuse=False, slot_type=None, target_obj=None, slot_name=No
if target_obj:
if not slot_type:
slot_type = get_slot_type_for(target_obj)
if not slot_name:
slot_name = f"SLOT-{slot_type}"
make_action_slot(action, slot_type, slot_name)
if not slot_name:
slot_name = f"SLOT-{slot_type}"
make_action_slot(action, slot_type, slot_name)
return action
@@ -2088,6 +2088,34 @@ def clear_action(action, slot_type=None, slot_name=None):
return False
def get_all_action_channels(action: bpy.types.Action):
channels = []
if action:
if B440():
if not action.layers:
layer = action.layers.new("Layer")
else:
layer = action.layers[0]
if not layer.strips:
strip = layer.strips.new(type='KEYFRAME')
else:
strip = layer.strips[0]
for channelbag in strip.channelbags:
channels.append(channelbag)
else:
channels.append(action)
return channels
def get_action_fcurves(action: bpy.types.Action):
fcurves = []
channels = get_all_action_channels(action)
for channel in channels:
for fcurve in channel.fcurves:
fcurves.append(fcurve)
return fcurves
def get_action_channels(action: bpy.types.Action, slot=None, slot_type=None):
if not action:
return None
@@ -2812,7 +2840,7 @@ def set_rl_object_id(obj, new_id=None):
def get_rl_object_id(obj):
if obj:
if object_exists(obj):
if obj.type == "ARMATURE" and "rl_armature_id" in obj:
return obj["rl_armature_id"]
if "rl_object_id" in obj:
@@ -2938,3 +2966,13 @@ def smallest_index(items: list):
smallest_value = value
index = i
return index
def safe_free_bake(point_cache):
if B320():
with bpy.context.temp_override(point_cache=point_cache):
bpy.ops.ptcache.free_bake()
else:
context_override = bpy.context.copy()
context_override["point_cache"] = point_cache
bpy.ops.ptcache.free_bake(context_override)
@@ -18,11 +18,11 @@
import bpy
VERSION_STRING = "v2.3.3"
VERSION_STRING = "v2.3.4"
DEV = False
#DEV = True
PLUGIN_COMPATIBLE = [
"2.3.2", "2.3.3",
"2.3.4",
]
def set_version_string(bl_info):