Browse Source

Fix for new transformers version.

pull/460/head
comfyanonymous 2 years ago
parent
commit
92eca60ec9
  1. 4
      comfy/clip_vision.py

4
comfy/clip_vision.py

@ -1,6 +1,7 @@
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor
from .utils import load_torch_file, transformers_convert from .utils import load_torch_file, transformers_convert
import os import os
import torch
class ClipVisionModel(): class ClipVisionModel():
def __init__(self, json_config): def __init__(self, json_config):
@ -20,7 +21,8 @@ class ClipVisionModel():
self.model.load_state_dict(sd, strict=False) self.model.load_state_dict(sd, strict=False)
def encode_image(self, image): def encode_image(self, image):
inputs = self.processor(images=[image[0]], return_tensors="pt") img = torch.clip((255. * image[0]), 0, 255).round().int()
inputs = self.processor(images=[img], return_tensors="pt")
outputs = self.model(**inputs) outputs = self.model(**inputs)
return outputs return outputs

Loading…
Cancel
Save