Add unfinished ImageOnlyCheckpointSave node to save a SVD checkpoint.
This node is unfinished, SVD checkpoints saved with this node will work with ComfyUI but not with anything else.
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
from .utils import load_torch_file, transformers_convert, common_upscale
|
||||
from .utils import load_torch_file, transformers_convert, common_upscale, state_dict_prefix_replace
|
||||
import os
|
||||
import torch
|
||||
import contextlib
|
||||
@@ -41,9 +41,13 @@ class ClipVisionModel():
|
||||
self.model.eval()
|
||||
|
||||
self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
|
||||
|
||||
def load_sd(self, sd):
|
||||
return self.model.load_state_dict(sd, strict=False)
|
||||
|
||||
def get_sd(self):
|
||||
return self.model.state_dict()
|
||||
|
||||
def encode_image(self, image):
|
||||
comfy.model_management.load_model_gpu(self.patcher)
|
||||
pixel_values = clip_preprocess(image.to(self.load_device)).float()
|
||||
@@ -76,6 +80,9 @@ def convert_to_transformers(sd, prefix):
|
||||
sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
|
||||
|
||||
sd = transformers_convert(sd, prefix, "vision_model.", 48)
|
||||
else:
|
||||
replace_prefix = {prefix: ""}
|
||||
sd = state_dict_prefix_replace(sd, replace_prefix)
|
||||
return sd
|
||||
|
||||
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
|
||||
|
||||
@@ -179,19 +179,28 @@ class BaseModel(torch.nn.Module):
|
||||
def process_latent_out(self, latent):
|
||||
return self.latent_format.process_out(latent)
|
||||
|
||||
def state_dict_for_saving(self, clip_state_dict, vae_state_dict):
|
||||
clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict)
|
||||
def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_vision_state_dict=None):
|
||||
extra_sds = []
|
||||
if clip_state_dict is not None:
|
||||
extra_sds.append(self.model_config.process_clip_state_dict_for_saving(clip_state_dict))
|
||||
if vae_state_dict is not None:
|
||||
extra_sds.append(self.model_config.process_vae_state_dict_for_saving(vae_state_dict))
|
||||
if clip_vision_state_dict is not None:
|
||||
extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict))
|
||||
|
||||
unet_state_dict = self.diffusion_model.state_dict()
|
||||
unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict)
|
||||
vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict)
|
||||
|
||||
if self.get_dtype() == torch.float16:
|
||||
clip_state_dict = utils.convert_sd_to(clip_state_dict, torch.float16)
|
||||
vae_state_dict = utils.convert_sd_to(vae_state_dict, torch.float16)
|
||||
extra_sds = map(lambda sd: utils.convert_sd_to(sd, torch.float16), extra_sds)
|
||||
|
||||
if self.model_type == ModelType.V_PREDICTION:
|
||||
unet_state_dict["v_pred"] = torch.tensor([])
|
||||
|
||||
return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
|
||||
for sd in extra_sds:
|
||||
unet_state_dict.update(sd)
|
||||
|
||||
return unet_state_dict
|
||||
|
||||
def set_inpaint(self):
|
||||
self.inpaint_model = True
|
||||
|
||||
13
comfy/sd.py
13
comfy/sd.py
@@ -534,7 +534,14 @@ def load_unet(unet_path):
|
||||
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
|
||||
return model
|
||||
|
||||
def save_checkpoint(output_path, model, clip, vae, metadata=None):
|
||||
model_management.load_models_gpu([model, clip.load_model()])
|
||||
sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
|
||||
def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None):
|
||||
clip_sd = None
|
||||
load_models = [model]
|
||||
if clip is not None:
|
||||
load_models.append(clip.load_model())
|
||||
clip_sd = clip.get_sd()
|
||||
|
||||
model_management.load_models_gpu(load_models)
|
||||
clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None
|
||||
sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd)
|
||||
comfy.utils.save_torch_file(sd, output_path, metadata=metadata)
|
||||
|
||||
@@ -65,6 +65,12 @@ class BASE:
|
||||
replace_prefix = {"": "cond_stage_model."}
|
||||
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
|
||||
|
||||
def process_clip_vision_state_dict_for_saving(self, state_dict):
|
||||
replace_prefix = {}
|
||||
if self.clip_vision_prefix is not None:
|
||||
replace_prefix[""] = self.clip_vision_prefix
|
||||
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
|
||||
|
||||
def process_unet_state_dict_for_saving(self, state_dict):
|
||||
replace_prefix = {"": "model.diffusion_model."}
|
||||
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
|
||||
|
||||
Reference in New Issue
Block a user