From 577bd48b085566c69951bc2301e7472d10da6d48 Mon Sep 17 00:00:00 2001 From: Alisson Pereira Anjos <117223069+alisson-anjos@users.noreply.github.com> Date: Thu, 16 May 2024 02:49:19 +0100 Subject: [PATCH] Update custom node name Llava-Describer to Ollama-Describer (#684) --- custom-node-list.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/custom-node-list.json b/custom-node-list.json index a3bf2d0..6322f1c 100644 --- a/custom-node-list.json +++ b/custom-node-list.json @@ -7408,14 +7408,14 @@ }, { "author": "alisson-anjos", - "title": "ComfyUI-LLaVA-Describer", - "id": "llava-describer", - "reference": "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer", + "title": "ComfyUI-Ollama-Describer", + "id": "ollama-describer", + "reference": "https://github.com/alisson-anjos/ComfyUI-Ollama-Describer", "files": [ - "https://github.com/alisson-anjos/ComfyUI-LLaVA-Describer" + "https://github.com/alisson-anjos/ComfyUI-Ollama-Describer" ], "install_type": "git-clone", - "description": "This is an extension for ComfyUI to extract descriptions from your images using the multimodal model called LLaVa. The LLaVa model - Large Language and Vision Assistant, although trained on a relatively small dataset, demonstrates exceptional capabilities in understanding images and answering questions about them. This model shows behaviors similar to multimodal models like GPT-4, even when presented with unseen images and instructions." + "description": "This is an extension for ComfyUI that makes it possible to use some LLM models provided by Ollama, such as Gemma, Llava (multimodal), Llama2, Llama3 or Mistral. Speaking specifically of the LLaVa - Large Language and Vision Assistant model, although trained on a relatively small dataset, it demonstrates exceptional capabilities in understanding images and answering questions about them. This model presents similar behaviors to multimodal models such as GPT-4, even when presented with invisible images and instructions." }, { "author": "chaosaiart",