InvokeAI icon indicating copy to clipboard operation
InvokeAI copied to clipboard

[bug]: Can't use both reference image and regional guidance with Flux Kontext Dev

Open hippalectryon-0 opened this issue 5 months ago • 0 comments

Is there an existing issue for this problem?

  • [x] I have searched the existing issues

Install method

Invoke's Launcher

Operating system

Linux

GPU vendor

Nvidia (CUDA)

GPU model

No response

GPU VRAM

No response

Version number

6.5.1

Browser

No response

System Information

{ "version": "6.5.1", "dependencies": { "absl-py" : "2.3.1", "accelerate" : "1.10.1", "annotated-types" : "0.7.0", "anyio" : "4.10.0", "attrs" : "25.3.0", "bidict" : "0.23.1", "bitsandbytes" : "0.47.0", "blake3" : "1.0.5", "certifi" : "2022.12.7", "cffi" : "1.17.1", "charset-normalizer" : "2.1.1", "click" : "8.2.1", "colorama" : "0.4.6", "coloredlogs" : "15.0.1", "compel" : "2.1.1", "contourpy" : "1.3.3", "CUDA" : "12.8", "cycler" : "0.12.1", "Deprecated" : "1.2.18", "diffusers" : "0.33.0", "dnspython" : "2.7.0", "dynamicprompts" : "0.31.0", "einops" : "0.8.1", "fastapi" : "0.116.1", "fastapi-events" : "0.12.2", "filelock" : "3.13.1", "flatbuffers" : "25.2.10", "fonttools" : "4.59.2", "fsspec" : "2024.6.1", "gguf" : "0.17.1", "h11" : "0.16.0", "httptools" : "0.6.4", "huggingface-hub" : "0.34.4", "humanfriendly" : "10.0", "idna" : "3.4", "importlib_metadata" : "7.1.0", "InvokeAI" : "6.5.1", "jax" : "0.7.1", "jaxlib" : "0.7.1", "Jinja2" : "3.1.4", "kiwisolver" : "1.4.9", "MarkupSafe" : "2.1.5", "matplotlib" : "3.10.6", "mediapipe" : "0.10.14", "ml_dtypes" : "0.5.3", "mpmath" : "1.3.0", "networkx" : "3.3", "numpy" : "1.26.3", "onnx" : "1.16.1", "onnxruntime" : "1.19.2", "opencv-contrib-python": "4.11.0.86", "opt_einsum" : "3.4.0", "packaging" : "24.1", "picklescan" : "0.0.30", "pillow" : "11.0.0", "prompt_toolkit" : "3.0.52", "protobuf" : "4.25.8", "psutil" : "7.0.0", "pycparser" : "2.22", "pydantic" : "2.11.7", "pydantic-settings" : "2.10.1", "pydantic_core" : "2.33.2", "pyparsing" : "3.2.3", "PyPatchMatch" : "1.0.2", "pyreadline3" : "3.5.4", "python-dateutil" : "2.9.0.post0", "python-dotenv" : "1.1.1", "python-engineio" : "4.12.2", "python-multipart" : "0.0.20", "python-socketio" : "5.13.0", "PyWavelets" : "1.9.0", "PyYAML" : "6.0.2", "regex" : "2025.8.29", "requests" : "2.28.1", "safetensors" : "0.6.2", "scipy" : "1.16.1", "semver" : "3.0.4", "sentencepiece" : "0.2.0", "setuptools" : "70.2.0", "simple-websocket" : "1.1.0", "six" : "1.17.0", "sniffio" : "1.3.1", "sounddevice" : "0.5.2", "spandrel" : "0.4.1", "starlette" : "0.47.3", "sympy" : "1.13.3", "tokenizers" : "0.22.0", "torch" : "2.7.1+cu128", "torchsde" : "0.2.6", "torchvision" : "0.22.1+cu128", "tqdm" : "4.66.5", "trampoline" : "0.1.2", "transformers" : "4.56.0", "typing-inspection" : "0.4.1", "typing_extensions" : "4.12.2", "urllib3" : "1.26.13", "uvicorn" : "0.35.0", "watchfiles" : "1.1.0", "wcwidth" : "0.2.13", "websockets" : "15.0.1", "wrapt" : "1.17.3", "wsproto" : "1.2.0", "zipp" : "3.19.2" }, "config": { "schema_version": "4.0.2", "legacy_models_yaml_path": null, "host": "0.0.0.0", "port": 9090, "allow_origins": [], "allow_credentials": true, "allow_methods": [""], "allow_headers": [""], "ssl_certfile": null, "ssl_keyfile": null, "log_tokenization": false, "patchmatch": true, "models_dir": "models", "convert_cache_dir": "models\.convert_cache", "download_cache_dir": "models\.download_cache", "legacy_conf_dir": "configs", "db_dir": "Z:\AIGEN\INVOKE\databases", "outputs_dir": "Z:\AIGEN\INVOKE\outputs", "custom_nodes_dir": "nodes", "style_presets_dir": "style_presets", "workflow_thumbnails_dir": "workflow_thumbnails", "log_handlers": ["console"], "log_format": "color", "log_level": "info", "log_sql": false, "log_level_network": "warning", "use_memory_db": false, "dev_reload": false, "profile_graphs": false, "profile_prefix": null, "profiles_dir": "profiles", "max_cache_ram_gb": 64, "max_cache_vram_gb": null, "log_memory_usage": false, "device_working_mem_gb": 3, "enable_partial_loading": false, "keep_ram_copy_of_weights": true, "ram": null, "vram": null, "lazy_offload": true, "pytorch_cuda_alloc_conf": null, "device": "auto", "precision": "auto", "sequential_guidance": false, "attention_type": "auto", "attention_slice_size": "auto", "force_tiled_decode": false, "pil_compress_level": 1, "max_queue_size": 10000, "clear_queue_on_startup": false, "allow_nodes": null, "deny_nodes": null, "node_cache_size": 512, "hashing_algorithm": "blake3_single", "remote_api_tokens": [ {"url_regex": "civitai.com", "token": "REDACTED"}, {"url_regex": "huggingface.co", "token": "REDACTED"} ], "scan_models_on_startup": false, "unsafe_disable_picklescan": false }, "set_config_fields": [ "legacy_models_yaml_path", "host", "remote_api_tokens", "max_cache_ram_gb", "outputs_dir", "db_dir" ] }

What happened

With Flux.1 Kontext Dev (quantized), when using at the same time a reference image and a regional guidance, generations fail with the following message:

RuntimeError: The expanded size of the tensor (9345) must match the existing size (5289) at non-singleton dimension 3. Target sizes: [1, 24, 9345, 9345]. Tensor sizes: [5289, 5289]

What you expected to happen

Either to work, or for the UI to disable one of the guidance methods when the other is used if this is an expected behavior

How to reproduce the problem

No response

Additional context

No response

Discord username

No response

hippalectryon-0 avatar Sep 05 '25 10:09 hippalectryon-0