Browse Source

Update more stuff for open_clip switch

pull/18/head
pharmapsychotic 2 years ago
parent
commit
e3c1a4df84
  1. 13
      README.md
  2. 2
      cog.yaml
  3. 2
      predict.py
  4. 3
      requirements.txt
  5. 9
      run_cli.py

13
README.md

@ -10,6 +10,12 @@ Run Version 2 on Colab, HuggingFace, and Replicate!
<br>
For **Stable Diffusion 2.0** prompting use the `ViT-H` version:
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb) [![Generic badge](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue.svg)](https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2)
<br>
Version 1 still available in Colab for comparing different CLIP models
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/v1/clip_interrogator.ipynb)
@ -30,7 +36,6 @@ source ci_env/bin/activate
Install with PIP
```
pip install -e git+https://github.com/openai/CLIP.git@main#egg=clip
pip install -e git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip
pip install clip-interrogator
```
@ -40,6 +45,10 @@ You can then use it in your script
from PIL import Image
from clip_interrogator import Interrogator, Config
image = Image.open(image_path).convert('RGB')
ci = Interrogator(Config(clip_model_name="ViT-L/14"))
ci = Interrogator(Config(clip_model_name="ViT-L-14/openai"))
print(ci.interrogate(image))
```
CLIP Interrogator uses OpenCLIP which supports many different pretrained CLIP models. For the best prompts for
Stable Diffusion 1.X use `ViT-L-14/openai` for clip_model_name. For Stable Diffusion 2.0 use `ViT-H-14/laion2b_s32b_b79k`

2
cog.yaml

@ -12,9 +12,9 @@ build:
- "ftfy==6.1.1"
- "torch==1.11.0 --extra-index-url=https://download.pytorch.org/whl/cu113"
- "torchvision==0.12.0 --extra-index-url=https://download.pytorch.org/whl/cu113"
- "open_clip_torch==2.7.0"
run:
- pip install -e git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip
- pip install -e git+https://github.com/openai/CLIP.git@main#egg=clip
- mkdir -p /root/.cache/clip && wget --output-document "/root/.cache/clip/ViT-L-14.pt" "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
predict: "predict.py:Predictor"

2
predict.py

@ -9,7 +9,7 @@ from clip_interrogator import Interrogator, Config
class Predictor(BasePredictor):
def setup(self):
config = Config(device="cuda:0", clip_model_name='ViT-L/14')
config = Config(device="cuda:0", clip_model_name='ViT-L-14/openai')
self.ci = Interrogator(config)
def predict(self, image: Path = Input(description="Input image")) -> str:

3
requirements.txt

@ -2,4 +2,5 @@ torch
torchvision
Pillow
requests
tqdm
tqdm
open_clip_torch

9
run_cli.py

@ -1,7 +1,7 @@
#!/usr/bin/env python3
import argparse
import clip
import csv
import open_clip
import os
import requests
import torch
@ -19,7 +19,7 @@ def inference(ci, image, mode):
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clip', default='ViT-L/14', help='name of CLIP model to use')
parser.add_argument('-c', '--clip', default='ViT-L-14/openai', help='name of CLIP model to use')
parser.add_argument('-f', '--folder', help='path to folder of images')
parser.add_argument('-i', '--image', help='image file or url')
parser.add_argument('-m', '--mode', default='best', help='best, classic, or fast')
@ -34,9 +34,10 @@ def main():
exit(1)
# validate clip model name
if args.clip not in clip.available_models():
models = ['/'.join(x) for x in open_clip.list_pretrained()]
if args.clip not in models:
print(f"Could not find CLIP model {args.clip}!")
print(f" available models: {clip.available_models()}")
print(f" available models: {models}")
exit(1)
# generate a nice prompt

Loading…
Cancel
Save