Xformers is now properly disabled when --cpu used.

Added --windows-standalone-build option, currently it only opens
makes the code open up comfyui in the browser.
This commit is contained in:
comfyanonymous
2023-03-12 15:44:16 -04:00
parent 6d6758e9e4
commit 0f3ba7482f
5 changed files with 34 additions and 12 deletions

View File

@@ -14,9 +14,8 @@ import model_management
try:
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
pass
# CrossAttn precision handling
import os
@@ -481,7 +480,7 @@ class CrossAttentionPytorch(nn.Module):
return self.to_out(out)
import sys
if XFORMERS_IS_AVAILBLE == False or "--disable-xformers" in sys.argv:
if model_management.xformers_enabled() == False:
if "--use-split-cross-attention" in sys.argv:
print("Using split optimization for cross attention")
CrossAttention = CrossAttentionDoggettx

View File

@@ -12,10 +12,8 @@ import model_management
try:
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
print("No module 'xformers'. Proceeding without it.")
pass
try:
OOM_EXCEPTION = torch.cuda.OutOfMemoryError
@@ -315,7 +313,7 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
if model_management.xformers_enabled() and attn_type == "vanilla":
attn_type = "vanilla-xformers"
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":

View File

@@ -31,6 +31,16 @@ try:
except:
pass
try:
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
if "--disable-xformers" in sys.argv:
XFORMERS_IS_AVAILBLE = False
if "--cpu" in sys.argv:
vram_state = CPU
if "--lowvram" in sys.argv:
@@ -159,6 +169,11 @@ def get_autocast_device(dev):
return dev.type
return "cuda"
def xformers_enabled():
if vram_state == CPU:
return False
return XFORMERS_IS_AVAILBLE
def get_free_memory(dev=None, torch_free_too=False):
if dev is None:
dev = get_torch_device()