Browse Source

update DB

pull/252/head
Dr.Lt.Data 11 months ago
parent
commit
7839d2308c
  1. 4
      custom-node-list.json
  2. 6
      extension-node-map.json
  3. 20
      model-list.json
  4. 6
      node_db/new/extension-node-map.json
  5. 21
      node_db/new/model-list.json

4
custom-node-list.json

@ -2388,13 +2388,13 @@
}, },
{ {
"author": "shadowcz007", "author": "shadowcz007",
"title": "comfyui-mixlab-nodes [WIP]", "title": "comfyui-mixlab-nodes",
"reference": "https://github.com/shadowcz007/comfyui-mixlab-nodes", "reference": "https://github.com/shadowcz007/comfyui-mixlab-nodes",
"files": [ "files": [
"https://github.com/shadowcz007/comfyui-mixlab-nodes" "https://github.com/shadowcz007/comfyui-mixlab-nodes"
], ],
"install_type": "git-clone", "install_type": "git-clone",
"description": "Nodes: RandomPrompt, TransparentImage, LoadImageFromPath, Splitting a long image into sections, FaceToMask, AreaToMask, ImagesCrop, ImageCropByAlpha, FeatheredMask, SplitLongMask, EnhanceImage, CLIPSeg, Consistency Decoder Loader, Consistency Decoder Decode, ..." "description": "3D, ScreenShareNode & FloatingVideoNode, SpeechRecognition & SpeechSynthesis, GPT, LoadImagesFromLocal, Layers, Other Nodes, ..."
}, },
{ {
"author": "ostris", "author": "ostris",

6
extension-node-map.json

@ -2356,7 +2356,9 @@
"https://github.com/aegis72/aegisflow_utility_nodes": [ "https://github.com/aegis72/aegisflow_utility_nodes": [
[ [
"Aegisflow Image Pass", "Aegisflow Image Pass",
"Aegisflow Latent Pass", "Aegisflow Latent Pass",
"Aegisflow Model Pass",
"Aegisflow VAE Pass",
"Aegisflow controlnet preprocessor bus", "Aegisflow controlnet preprocessor bus",
"Brightness & Contrast_Ally", "Brightness & Contrast_Ally",
"Gaussian Blur_Ally", "Gaussian Blur_Ally",
@ -5046,7 +5048,7 @@
"VAELoaderConsistencyDecoder" "VAELoaderConsistencyDecoder"
], ],
{ {
"title_aux": "comfyui-mixlab-nodes [WIP]" "title_aux": "comfyui-mixlab-nodes"
} }
], ],
"https://github.com/shiimizu/ComfyUI_smZNodes": [ "https://github.com/shiimizu/ComfyUI_smZNodes": [

20
model-list.json

@ -510,6 +510,26 @@
"filename": "pytorch_lora_weights.safetensors", "filename": "pytorch_lora_weights.safetensors",
"url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors" "url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors"
}, },
{
"name": "Segmind-Vega",
"type": "checkpoint",
"base": "segmind-vega",
"save_path": "checkpoints/segmind-vega",
"description": "The Segmind-Vega Model is a distilled version of the Stable Diffusion XL (SDXL), offering a remarkable 70% reduction in size and an impressive 100% speedup while retaining high-quality text-to-image generation capabilities.",
"reference": "https://huggingface.co/segmind/Segmind-Vega",
"filename": "segmind-vega.safetensors",
"url": "https://huggingface.co/segmind/Segmind-Vega/resolve/main/segmind-vega.safetensors"
},
{
"name": "Segmind-VegaRT - Latent Consistency Model (LCM) LoRA of Segmind-Vega",
"type": "lora",
"base": "segmind-vega",
"save_path": "loras/segmind-vega",
"description": "Segmind-VegaRT a distilled consistency adapter for Segmind-Vega that allows to reduce the number of inference steps to only between 2 - 8 steps.",
"reference": "https://huggingface.co/segmind/Segmind-VegaRT",
"filename": "pytorch_lora_weights.safetensors",
"url": "https://huggingface.co/segmind/Segmind-VegaRT/resolve/main/pytorch_lora_weights.safetensors"
},
{ {
"name": "Theovercomer8's Contrast Fix (SD2.1)", "name": "Theovercomer8's Contrast Fix (SD2.1)",
"type": "lora", "type": "lora",

6
node_db/new/extension-node-map.json

@ -2356,7 +2356,9 @@
"https://github.com/aegis72/aegisflow_utility_nodes": [ "https://github.com/aegis72/aegisflow_utility_nodes": [
[ [
"Aegisflow Image Pass", "Aegisflow Image Pass",
"Aegisflow Latent Pass", "Aegisflow Latent Pass",
"Aegisflow Model Pass",
"Aegisflow VAE Pass",
"Aegisflow controlnet preprocessor bus", "Aegisflow controlnet preprocessor bus",
"Brightness & Contrast_Ally", "Brightness & Contrast_Ally",
"Gaussian Blur_Ally", "Gaussian Blur_Ally",
@ -5046,7 +5048,7 @@
"VAELoaderConsistencyDecoder" "VAELoaderConsistencyDecoder"
], ],
{ {
"title_aux": "comfyui-mixlab-nodes [WIP]" "title_aux": "comfyui-mixlab-nodes"
} }
], ],
"https://github.com/shiimizu/ComfyUI_smZNodes": [ "https://github.com/shiimizu/ComfyUI_smZNodes": [

21
node_db/new/model-list.json

@ -1,5 +1,26 @@
{ {
"models": [ "models": [
{
"name": "Segmind-Vega",
"type": "checkpoint",
"base": "segmind-vega",
"save_path": "checkpoints/segmind-vega",
"description": "The Segmind-Vega Model is a distilled version of the Stable Diffusion XL (SDXL), offering a remarkable 70% reduction in size and an impressive 100% speedup while retaining high-quality text-to-image generation capabilities.",
"reference": "https://huggingface.co/segmind/Segmind-Vega",
"filename": "segmind-vega.safetensors",
"url": "https://huggingface.co/segmind/Segmind-Vega/resolve/main/segmind-vega.safetensors"
},
{
"name": "Segmind-VegaRT - Latent Consistency Model (LCM) LoRA of Segmind-Vega",
"type": "lora",
"base": "segmind-vega",
"save_path": "loras/segmind-vega",
"description": "Segmind-VegaRT a distilled consistency adapter for Segmind-Vega that allows to reduce the number of inference steps to only between 2 - 8 steps.",
"reference": "https://huggingface.co/segmind/Segmind-VegaRT",
"filename": "pytorch_lora_weights.safetensors",
"url": "https://huggingface.co/segmind/Segmind-VegaRT/resolve/main/pytorch_lora_weights.safetensors"
},
{ {
"name": "stabilityai/Stable Zero123", "name": "stabilityai/Stable Zero123",
"type": "zero123", "type": "zero123",

Loading…
Cancel
Save