[bug]: Cannot install Flux models
Is there an existing issue for this problem?
- [x] I have searched the existing issues
Install method
Invoke's Launcher
Operating system
Windows
GPU vendor
Nvidia (CUDA)
GPU model
RTX 4060
GPU VRAM
8GB
Version number
v6.9.0
Browser
Launcher's Provided GUI
System Information
{ "version": "6.9.0", "dependencies": { "absl-py" : "2.3.1", "accelerate" : "1.10.1", "annotated-types" : "0.7.0", "anyio" : "4.11.0", "attrs" : "25.4.0", "bidict" : "0.23.1", "bitsandbytes" : "0.48.1", "blake3" : "1.0.8", "certifi" : "2022.12.7", "cffi" : "2.0.0", "charset-normalizer" : "2.1.1", "click" : "8.3.0", "colorama" : "0.4.6", "coloredlogs" : "15.0.1", "compel" : "2.1.1", "contourpy" : "1.3.3", "CUDA" : "12.8", "cycler" : "0.12.1", "Deprecated" : "1.2.18", "diffusers" : "0.33.0", "dnspython" : "2.8.0", "dynamicprompts" : "0.31.0", "einops" : "0.8.1", "fastapi" : "0.118.3", "fastapi-events" : "0.12.2", "filelock" : "3.19.1", "flatbuffers" : "25.9.23", "fonttools" : "4.60.1", "fsspec" : "2025.9.0", "gguf" : "0.17.1", "h11" : "0.16.0", "httptools" : "0.7.1", "huggingface-hub" : "0.35.3", "humanfriendly" : "10.0", "idna" : "3.4", "importlib_metadata" : "7.1.0", "InvokeAI" : "6.9.0", "jax" : "0.7.1", "jaxlib" : "0.7.1", "Jinja2" : "3.1.6", "kiwisolver" : "1.4.9", "MarkupSafe" : "2.1.5", "matplotlib" : "3.10.7", "mediapipe" : "0.10.14", "ml_dtypes" : "0.5.3", "mpmath" : "1.3.0", "networkx" : "3.5", "numpy" : "1.26.3", "onnx" : "1.16.1", "onnxruntime" : "1.19.2", "opencv-contrib-python": "4.11.0.86", "opt_einsum" : "3.4.0", "packaging" : "24.1", "picklescan" : "0.0.31", "pillow" : "11.3.0", "prompt_toolkit" : "3.0.52", "protobuf" : "4.25.8", "psutil" : "7.1.0", "pycparser" : "2.23", "pydantic" : "2.12.2", "pydantic-settings" : "2.11.0", "pydantic_core" : "2.41.4", "pyparsing" : "3.2.5", "PyPatchMatch" : "1.0.2", "pyreadline3" : "3.5.4", "python-dateutil" : "2.9.0.post0", "python-dotenv" : "1.1.1", "python-engineio" : "4.12.3", "python-multipart" : "0.0.20", "python-socketio" : "5.14.2", "PyWavelets" : "1.9.0", "PyYAML" : "6.0.3", "regex" : "2025.9.18", "requests" : "2.28.1", "safetensors" : "0.6.2", "scipy" : "1.16.2", "semver" : "3.0.4", "sentencepiece" : "0.2.0", "setuptools" : "70.2.0", "simple-websocket" : "1.1.0", "six" : "1.17.0", "sniffio" : "1.3.1", "sounddevice" : "0.5.2", "spandrel" : "0.4.1", "starlette" : "0.48.0", "sympy" : "1.14.0", "tokenizers" : "0.22.1", "torch" : "2.7.1+cu128", "torchsde" : "0.2.6", "torchvision" : "0.22.1+cu128", "tqdm" : "4.66.5", "trampoline" : "0.1.2", "transformers" : "4.57.1", "typing-inspection" : "0.4.2", "typing_extensions" : "4.15.0", "urllib3" : "1.26.13", "uvicorn" : "0.37.0", "watchfiles" : "1.1.1", "wcwidth" : "0.2.14", "websockets" : "15.0.1", "wrapt" : "1.17.3", "wsproto" : "1.2.0", "zipp" : "3.19.2" }, "config": { "schema_version": "4.0.2", "legacy_models_yaml_path": null, "host": "127.0.0.1", "port": 9090, "allow_origins": [], "allow_credentials": true, "allow_methods": [""], "allow_headers": [""], "ssl_certfile": null, "ssl_keyfile": null, "log_tokenization": false, "patchmatch": true, "models_dir": "models", "convert_cache_dir": "models\.convert_cache", "download_cache_dir": "models\.download_cache", "legacy_conf_dir": "configs", "db_dir": "databases", "outputs_dir": "outputs", "custom_nodes_dir": "nodes", "style_presets_dir": "style_presets", "workflow_thumbnails_dir": "workflow_thumbnails", "log_handlers": ["console"], "log_format": "color", "log_level": "info", "log_sql": false, "log_level_network": "warning", "use_memory_db": false, "dev_reload": false, "profile_graphs": false, "profile_prefix": null, "profiles_dir": "profiles", "max_cache_ram_gb": null, "max_cache_vram_gb": null, "log_memory_usage": false, "device_working_mem_gb": 3, "enable_partial_loading": true, "keep_ram_copy_of_weights": true, "ram": null, "vram": null, "lazy_offload": true, "pytorch_cuda_alloc_conf": null, "device": "auto", "precision": "auto", "sequential_guidance": false, "attention_type": "auto", "attention_slice_size": "auto", "force_tiled_decode": false, "pil_compress_level": 1, "max_queue_size": 10000, "clear_queue_on_startup": false, "allow_nodes": null, "deny_nodes": null, "node_cache_size": 512, "hashing_algorithm": "blake3_single", "remote_api_tokens": null, "scan_models_on_startup": false, "unsafe_disable_picklescan": false, "allow_unknown_models": true }, "set_config_fields": ["enable_partial_loading", "legacy_models_yaml_path"] }
What happened
Every time I try to install a Flux1 safetensor model I get this error: Process exited with code 3221225477
What you expected to happen
Being able to import Flux1 related models and LoRAs
How to reproduce the problem
Download Flux1.krea.dev and try to add it via scan folder
Additional context
No response
Discord username
No response
Update - cannot install ANY kind of Flux - even from the supported models tab. The download finishes, then nothing happens for hours. In previous versions adding Flux models were like 2 minutes tops.
Unfortunately, inconsistent out of memory errors can happen when you don't meet the minimum requirements for Flux as listed here.
That said, can you try adding the following line to your invokeai.yaml file and seeing if that lets you install? Also, how much RAM (not VRAM) do you have?
hashing_algorithm: random
I have 32GB Ram - I try to close as many programs as I can to free that up. I don't understand what this has to do with out of memory errors, because with this exact same computer I have i've been using FLUX dev in invokeai without any issues. But that was back in 6.5-ish version. But I had to reinstall it and now the latest version does not allow any kind of FLUX to be added as a model.
I have added hashing_algorithm: random to the yaml - booted up invokeai, but still:
[2025-10-25 20:02:08,942]::[InvokeAI]::INFO --> Started installation of E:\ai_files\flux1-krea-dev_original.safetensors [2025-10-25 20:02:08,943]::[ModelInstallService]::INFO --> Model install started: E:/ai_files/flux1-krea-dev_original.safetensors
Hashing flux1-krea-dev_original.safetensors: 0%| | 0/1 [00:00<?, ?file/s] Hashing flux1-krea-dev_original.safetensors: 0%| | 0/1 [00:00<?, ?file/s] Hashing flux1-krea-dev_original.safetensors: 100%|##########| 1/1 [00:00<00:00, 999.60file/s] Process exited with code 3221225477
It does not matter what kind of FLUX model I am trying, quantitized, regular, dev, schnell, all of them result in this.
Same problem here with v1.8.1, Windows 11, RTX 4090 (24gb), 64gb RAM, >1TB free disk space, "system managed" page size. hashing_algorithm: random has no effect. Using "in-place install" works for all but FLUX.1-Fill-dev_flux1-fill-dev.safetensors which still fails with Process exited with code 3221225477