In the git version of invokeai, switching models or trying to generate an images results in this error, with no stacktrace:
Inconsistency detected by ld.so: ../elf/dl-tls.c: 618: _dl_allocate_tls_init: Assertion `listp != NULL' failed!
Looking this up it looks to be... some kind of glibc issue? 😮 It seems to persist with another version of glibc though.
openat(AT_FDCWD, "/home/alpha/Storage/AIModels/invokeinstall/models/diffusers/models--stabilityai--sd-vae-ft-mse/snapshots/ad7ac2cf88578c68f660449f60fe9496f35a1cbf/config.json", O_RDONLY|O_CLOEXEC) = 40
newfstatat(40, "", {st_mode=S_IFREG|0644, st_size=547, ...}, AT_EMPTY_PATH) = 0
ioctl(40, TCGETS, 0x7ffd566ea0f0) = -1 ENOTTY (Inappropriate ioctl for device)
lseek(40, 0, SEEK_CUR) = 0
lseek(40, 0, SEEK_CUR) = 0
newfstatat(40, "", {st_mode=S_IFREG|0644, st_size=547, ...}, AT_EMPTY_PATH) = 0
read(40, "{\n \"_class_name\": \"AutoencoderK"..., 548) = 547
read(40, "", 1) = 0
close(40) = 0
brk(0xaa2f3000) = 0xaa2f3000
brk(0xaa773000) = 0xaa773000
mmap(NULL, 1048576, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f0a3bf00000
openat(AT_FDCWD, "/home/alpha/Storage/AIModels/invokeinstall/models/diffusers/models--stabilityai--sd-vae-ft-mse/snapshots/ad7ac2cf88578c68f660449f60fe9496f35a1cbf/diffusion_pytorch_model.safetensors", O_RDONLY|O_CLOEXEC) = 40
statx(0, NULL, AT_STATX_SYNC_AS_STAT, STATX_ALL, NULL) = -1 EFAULT (Bad address)
statx(40, "", AT_STATX_SYNC_AS_STAT|AT_EMPTY_PATH, STATX_ALL, {stx_mask=STATX_ALL|STATX_MNT_ID, stx_attributes=0, stx_mode=S_IFREG|0644, stx_size=334643276, ...}) = 0
mmap(NULL, 334643276, PROT_READ, MAP_SHARED, 40, 0) = 0x7f0a27e00000
openat(AT_FDCWD, "/home/alpha/Storage/AIModels/invokeinstall/models/diffusers/models--stabilityai--sd-vae-ft-mse/snapshots/ad7ac2cf88578c68f660449f60fe9496f35a1cbf/diffusion_pytorch_model.safetensors", O_RDONLY) = 41
fstat(41, {st_mode=S_IFREG|0644, st_size=334643276, ...}) = 0
mmap(NULL, 334643276, PROT_READ|PROT_WRITE, MAP_PRIVATE, 41, 0) = 0x7f09fc000000
close(41) = 0
munmap(0x7f0a27e00000, 334643276) = 0
close(40) = 0
mmap(NULL, 8392704, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_STACK, -1, 0) = 0x7f0a3b6ff000
mprotect(0x7f0a3b700000, 8388608, PROT_READ|PROT_WRITE|PROT_EXEC) = 0
writev(2, [{iov_base="Inconsistency detected by ld.so:"..., iov_len=33}, {iov_base="../elf/dl-tls.c", iov_len=15}, {iov_base=": ", iov_len=2}, {iov_base="618", iov_len=3}, {iov_base=": ", iov_len=2}, {iov_base="_dl_allocate_tls_init", iov_len=21}, {iov_base=": ", iov_len=2}, {iov_base="Assertion `", iov_len=11}, {iov_base="listp != NULL", iov_len=13}, {iov_base="' failed!\n", iov_len=10}], 10Inconsistency detected by ld.so: ../elf/dl-tls.c: 618: _dl_allocate_tls_init: Assertion `listp != NULL' failed!
Looks like its erroring it when it tries to use the VAE? The issue persists after deleting the whole VAE folder and letting it redownload.
grad_mode.py(152): self.mode = mode
module.py(807): param_applied = fn(param)
--- modulename: module, funcname: convert
module.py(1127): if convert_to_format is not None and t.dim() in (4, 5):
module.py(1130): return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
module.py(806): with torch.no_grad():
--- modulename: grad_mode, funcname: __exit__
grad_mode.py(58): torch.set_grad_enabled(self.prev)
--- modulename: grad_mode, funcname: __init__
grad_mode.py(150): self.prev = torch.is_grad_enabled()
grad_mode.py(151): torch._C._set_grad_enabled(mode)
grad_mode.py(152): self.mode = mode
module.py(808): should_use_set_data = compute_should_use_set_data(param, param_applied)
--- modulename: module, funcname: compute_should_use_set_data
module.py(787): if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
module.py(796): return not torch.__future__.get_overwrite_module_params_on_conversion()
--- modulename: __future__, funcname: get_overwrite_module_params_on_conversion
__future__.py(21): return _overwrite_module_params_on_conversion
module.py(809): if should_use_set_data:
module.py(810): param.data = param_applied
module.py(811): out_param = param
module.py(818): if param.grad is not None:
module.py(800): for key, param in self._parameters.items():
module.py(829): for key, buf in self._buffers.items():
module.py(833): return self
module.py(783): for module in self.children():
--- modulename: module, funcname: children
module.py(2167): for name, module in self.named_children():
--- modulename: module, funcname: named_children
module.py(2186): for name, module in self._modules.items():
module.py(2187): if module is not None and module not in memo:
module.py(2188): memo.add(module)
module.py(2189): yield name, module
module.py(2168): yield module
module.py(784): module._apply(fn)
--- modulename: module, funcname: _apply
module.py(783): for module in self.children():
--- modulename: module, funcname: children
module.py(2167): for name, module in self.named_children():
--- modulename: module, funcname: named_children
module.py(2185): memo = set()
module.py(2186): for name, module in self._modules.items():
module.py(786): def compute_should_use_set_data(tensor, tensor_applied):
module.py(800): for key, param in self._parameters.items():
module.py(801): if param is None:
module.py(806): with torch.no_grad():
--- modulename: grad_mode, funcname: __init__
grad_mode.py(49): if not torch._jit_internal.is_scripting():
--- modulename: _jit_internal, funcname: is_scripting
_jit_internal.py(1105): return False
grad_mode.py(50): super().__init__()
grad_mode.py(51): self.prev = False
--- modulename: grad_mode, funcname: __enter__
grad_mode.py(54): self.prev = torch.is_grad_enabled()
grad_mode.py(55): torch.set_grad_enabled(False)
--- modulename: grad_mode, funcname: __init__
grad_mode.py(150): self.prev = torch.is_grad_enabled()
grad_mode.py(151): torch._C._set_grad_enabled(mode)
grad_mode.py(152): self.mode = mode
module.py(807): param_applied = fn(param)
--- modulename: module, funcname: convert
module.py(1127): if convert_to_format is not None and t.dim() in (4, 5):
module.py(1130): return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
Inconsistency detected by ld.so: ../elf/dl-tls.c: 618: _dl_allocate_tls_init: Assertion `listp != NULL' failed!
I've got some other environment changes to test, but so far this has me stumped.
Is there an existing issue for this?
OS
Linux
GPU
cuda
VRAM
6GB
What happened?
In the git version of invokeai, switching models or trying to generate an images results in this error, with no stacktrace:
Looking this up it looks to be... some kind of glibc issue? 😮 It seems to persist with another version of glibc though.
These are the last few lines of the linux
stracecommand:Looks like its erroring it when it tries to use the VAE? The issue persists after deleting the whole VAE folder and letting it redownload.
The last few lines of the python trace module are less informative:
I've got some other environment changes to test, but so far this has me stumped.