|
|
|
@ -710,6 +710,26 @@
|
|
|
|
|
"filename": "pytorch_model.bin", |
|
|
|
|
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K", |
|
|
|
|
"type": "clip_vision", |
|
|
|
|
"base": "SD1.5", |
|
|
|
|
"save_path": "clip_vision/SD1.5", |
|
|
|
|
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", |
|
|
|
|
"reference": "https://huggingface.co/h94/IP-Adapter", |
|
|
|
|
"filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", |
|
|
|
|
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors" |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k", |
|
|
|
|
"type": "clip_vision", |
|
|
|
|
"base": "SDXL", |
|
|
|
|
"save_path": "clip_vision/SDXL", |
|
|
|
|
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", |
|
|
|
|
"reference": "https://huggingface.co/h94/IP-Adapter", |
|
|
|
|
"filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", |
|
|
|
|
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors" |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"name": "stabilityai/control-lora-canny-rank128.safetensors", |
|
|
|
|
"type": "controlnet", |
|
|
|
|