qwen-image如何读取本地目录下的模型
这种方式是先下载模型,对于已经下载好的模型路径无法使用这种写法 pipe = QwenImagePipeline.from_pretrained( torch_dtype=torch.bfloat16, device="cuda", model_configs=[ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"), ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"), ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"), )
提供了ModelConfig(path="models/xxx.safetensors"),然而path一次只能加载一个文件,无法加载一个完整路径 还提供了local_model_path,这种用了报错 请问正确的完整的加载本地模型的代码是怎样的写法
用list[str]的方式传入path参数,例如:
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(path=[
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors",
"models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors",
]),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
)
@mi804 好兄弟,想问下我采用这种方式:
import glob
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
from PIL import Image
import torch
pipe = QwenImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(path=['/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00001-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00002-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00003-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00004-of-00004.safetensors']),
ModelConfig(path= "/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/vae/diffusion_pytorch_model.safetensors"),
ModelConfig(path=['/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors']),
],
tokenizer_config=ModelConfig(path="/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/yongzhao41/insert-anything/checkpoints/Qwen-Image/tokenizer/"),
)
prompt = "A detailed portrait of a girl underwater, wearing a blue flowing dress, hair gently floating, clear light and shadow, surrounded by bubbles, calm expression, fine details, dreamy and beautiful."
image = pipe(
prompt, seed=0, num_inference_steps=40,
)
image.save("image.jpg")
将text_encoder/model*.safetensors也采用list的方式导入,但是报错了:
Loading models from: ['/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00001-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00002-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00003-of-00004.safetensors', '/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/model/model-00004-of-00004.safetensors'] model_name: qwen_image_text_encoder model_class: QwenImageTextEncoder Traceback (most recent call last): File "/mnt/dolphinfs/hdd_pool/docker/user/hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/DiffSynth-Studio/lkwtest3.py", line 6, in <module> pipe = QwenImagePipeline.from_pretrained( File "/mnt/dolphinfs/hdd_pool/docker/user/hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/DiffSynth-Studio/diffsynth/pipelines/qwen_image.py", line 332, in from_pretrained model_manager.load_model( File "/mnt/dolphinfs/hdd_pool/docker/user/hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/DiffSynth-Studio/diffsynth/models/model_manager.py", line 411, in load_model model_names, models = model_detector.load( File "/mnt/dolphinfs/hdd_pool/docker/user/hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/DiffSynth-Studio/diffsynth/models/model_manager.py", line 185, in load loaded_model_names, loaded_models = load_model_from_single_file(state_dict, model_names, model_classes, model_resource, torch_dtype, device) File "/mnt/dolphinfs/hdd_pool/docker/user/hadoop-automaterials/likaiwen/PythonProjects/Qwen_image_train/DiffSynth-Studio/diffsynth/models/model_manager.py", line 76, in load_model_from_single_file model.load_state_dict(model_state_dict, assign=True) File "/home/hadoop-automaterials/dolphinfs_hdd_hadoop-automaterials/likaiwen/lkwanaconda/envc/qwen_image/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2584, in load_state_dict raise RuntimeError( RuntimeError: Error(s) in loading state_dict for QwenImageTextEncoder: Missing key(s) in state_dict: "model.embed_tokens.weight", "model.layers.0.self_attn.q_proj.weight", "model.layers.0.self_attn.q_proj.bias", "model.layers.0.self_attn.k_proj.weight", "model.layers.0.self_attn.k_proj.bias", "model.layers.0.self_attn.v_proj.weight", "model.layers.0.self_attn.v_proj.bias", "model.layers.0.self_attn.o_proj.weight", "model.layers.0.mlp.gate_proj.weight", "model.layers.0.mlp.up_proj.weight", "model.layers.0.mlp.down_proj.weight", "model.layers.0.input_layernorm.weight", "model.layers.0.post_attention_layernorm.weight", "model.layers.1.self_attn.q_proj.weight", "model.layers.1.self_attn.q_proj.bias", "model.layers.1.self_attn.k_proj.weight", "model.layers.1.self_attn.k_proj.bias", "model.layers.1.self_attn.v_proj.weight", "model.layers.1.self_attn.v_proj.bias", "model.layers.1.self_attn.o_proj.weight", "model.layers.1.mlp.gate_proj.weight", "model.layers.1.mlp.up_proj.weight", "model.layers.1.mlp.down_proj.weight", "model.layers.1.input_layernorm.weight", "model.layers.1.post_attention_layernorm.weight", "model.layers.2.self_attn.q_proj.weight", "model.layers.2.self_attn.q_proj.bias", "model.layers.2.self_attn.k_proj.weight", "model.layers.2.self_attn.k_proj.bias", "model.layers.2.self_attn.v_proj.weight",。请问你是怎么解决这个text_encoder的问题的?已知我的权重没有问题,我的权重可以在huggingface跑通,感激不尽
@1Yanxiaolin1 请问这个问题解决了嘛?