diff --git a/README.md b/README.md
index 86e0502..1e4a08d 100644
--- a/README.md
+++ b/README.md
@@ -10,6 +10,12 @@ Run Version 2 on Colab, HuggingFace, and Replicate!
+For **Stable Diffusion 2.0** prompting use the `ViT-H` version:
+
+[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb) [![Generic badge](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue.svg)](https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2)
+
+
+
Version 1 still available in Colab for comparing different CLIP models
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/v1/clip_interrogator.ipynb)
@@ -30,7 +36,6 @@ source ci_env/bin/activate
Install with PIP
```
-pip install -e git+https://github.com/openai/CLIP.git@main#egg=clip
pip install -e git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip
pip install clip-interrogator
```
@@ -40,6 +45,10 @@ You can then use it in your script
from PIL import Image
from clip_interrogator import Interrogator, Config
image = Image.open(image_path).convert('RGB')
-ci = Interrogator(Config(clip_model_name="ViT-L/14"))
+ci = Interrogator(Config(clip_model_name="ViT-L-14/openai"))
print(ci.interrogate(image))
```
+
+CLIP Interrogator uses OpenCLIP which supports many different pretrained CLIP models. For the best prompts for
+Stable Diffusion 1.X use `ViT-L-14/openai` for clip_model_name. For Stable Diffusion 2.0 use `ViT-H-14/laion2b_s32b_b79k`
+
diff --git a/cog.yaml b/cog.yaml
index c9a4bde..a9f9a8e 100644
--- a/cog.yaml
+++ b/cog.yaml
@@ -12,9 +12,9 @@ build:
- "ftfy==6.1.1"
- "torch==1.11.0 --extra-index-url=https://download.pytorch.org/whl/cu113"
- "torchvision==0.12.0 --extra-index-url=https://download.pytorch.org/whl/cu113"
+ - "open_clip_torch==2.7.0"
run:
- pip install -e git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip
- - pip install -e git+https://github.com/openai/CLIP.git@main#egg=clip
- mkdir -p /root/.cache/clip && wget --output-document "/root/.cache/clip/ViT-L-14.pt" "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
predict: "predict.py:Predictor"
diff --git a/predict.py b/predict.py
index c085797..4452b90 100644
--- a/predict.py
+++ b/predict.py
@@ -9,7 +9,7 @@ from clip_interrogator import Interrogator, Config
class Predictor(BasePredictor):
def setup(self):
- config = Config(device="cuda:0", clip_model_name='ViT-L/14')
+ config = Config(device="cuda:0", clip_model_name='ViT-L-14/openai')
self.ci = Interrogator(config)
def predict(self, image: Path = Input(description="Input image")) -> str:
diff --git a/requirements.txt b/requirements.txt
index 8bfd9cd..735e90b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,4 +2,5 @@ torch
torchvision
Pillow
requests
-tqdm
\ No newline at end of file
+tqdm
+open_clip_torch
\ No newline at end of file
diff --git a/run_cli.py b/run_cli.py
index 453abc6..efdd18b 100755
--- a/run_cli.py
+++ b/run_cli.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
import argparse
-import clip
import csv
+import open_clip
import os
import requests
import torch
@@ -19,7 +19,7 @@ def inference(ci, image, mode):
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--clip', default='ViT-L/14', help='name of CLIP model to use')
+ parser.add_argument('-c', '--clip', default='ViT-L-14/openai', help='name of CLIP model to use')
parser.add_argument('-f', '--folder', help='path to folder of images')
parser.add_argument('-i', '--image', help='image file or url')
parser.add_argument('-m', '--mode', default='best', help='best, classic, or fast')
@@ -34,9 +34,10 @@ def main():
exit(1)
# validate clip model name
- if args.clip not in clip.available_models():
+ models = ['/'.join(x) for x in open_clip.list_pretrained()]
+ if args.clip not in models:
print(f"Could not find CLIP model {args.clip}!")
- print(f" available models: {clip.available_models()}")
+ print(f" available models: {models}")
exit(1)
# generate a nice prompt