From 92eca60ec94d21b271b14eb7c832add963b09173 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 9 Apr 2023 15:47:35 -0400 Subject: [PATCH] Fix for new transformers version. --- comfy/clip_vision.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index cb29df43..efb2d538 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,6 +1,7 @@ from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from .utils import load_torch_file, transformers_convert import os +import torch class ClipVisionModel(): def __init__(self, json_config): @@ -20,7 +21,8 @@ class ClipVisionModel(): self.model.load_state_dict(sd, strict=False) def encode_image(self, image): - inputs = self.processor(images=[image[0]], return_tensors="pt") + img = torch.clip((255. * image[0]), 0, 255).round().int() + inputs = self.processor(images=[img], return_tensors="pt") outputs = self.model(**inputs) return outputs