From 55fe80c74ca6a389dc9234eb31494a438779d8e1 Mon Sep 17 00:00:00 2001 From: pharmapsychotic Date: Thu, 5 Jan 2023 15:51:52 -0600 Subject: [PATCH 1/3] Simplify install further Big ups to @justindujardin for proper syntax to get git dependency into requirements in way that pip will accept it! :D --- README.md | 7 +++---- clip_interrogator/__init__.py | 2 +- requirements.txt | 3 ++- setup.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index d9df433..a0f79dc 100644 --- a/README.md +++ b/README.md @@ -35,15 +35,14 @@ Install with PIP # install torch with GPU support for example: pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu117 -# install clip-interrogator and blip -pip install clip-interrogator==0.3.3 -pip install git+https://github.com/pharmapsychotic/BLIP.git +# install clip-interrogator +pip install clip-interrogator==0.3.4 ``` You can then use it in your script ```python from PIL import Image -from clip_interrogator import Interrogator, Config +from clip_interrogator import Config, Interrogator image = Image.open(image_path).convert('RGB') ci = Interrogator(Config(clip_model_name="ViT-L-14/openai")) print(ci.interrogate(image)) diff --git a/clip_interrogator/__init__.py b/clip_interrogator/__init__.py index 925f76a..a8f6aab 100644 --- a/clip_interrogator/__init__.py +++ b/clip_interrogator/__init__.py @@ -1,4 +1,4 @@ from .clip_interrogator import Interrogator, Config -__version__ = '0.3.3' +__version__ = '0.3.4' __author__ = 'pharmapsychotic' \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 735e90b..3a66935 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,5 @@ torchvision Pillow requests tqdm -open_clip_torch \ No newline at end of file +open_clip_torch +blip @ git+https://github.com/pharmapsychotic/BLIP.git \ No newline at end of file diff --git a/setup.py b/setup.py index 8201892..6554a13 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ from setuptools import setup, find_packages setup( name="clip-interrogator", - version="0.3.3", + version="0.3.4", license='MIT', author='pharmapsychotic', author_email='me@pharmapsychotic.com', From 65c560ffacf0cc01bac5be770ef8f611168b83d0 Mon Sep 17 00:00:00 2001 From: pharmapsychotic Date: Thu, 5 Jan 2023 16:33:27 -0600 Subject: [PATCH 2/3] Reuse existing blip-vit package on pypi --- README.md | 2 +- clip_interrogator/__init__.py | 2 +- requirements.txt | 2 +- setup.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a0f79dc..0ffda8a 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Install with PIP pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu117 # install clip-interrogator -pip install clip-interrogator==0.3.4 +pip install clip-interrogator==0.3.5 ``` You can then use it in your script diff --git a/clip_interrogator/__init__.py b/clip_interrogator/__init__.py index a8f6aab..7e92186 100644 --- a/clip_interrogator/__init__.py +++ b/clip_interrogator/__init__.py @@ -1,4 +1,4 @@ from .clip_interrogator import Interrogator, Config -__version__ = '0.3.4' +__version__ = '0.3.5' __author__ = 'pharmapsychotic' \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 3a66935..ced3937 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,4 @@ Pillow requests tqdm open_clip_torch -blip @ git+https://github.com/pharmapsychotic/BLIP.git \ No newline at end of file +blip-vit \ No newline at end of file diff --git a/setup.py b/setup.py index 6554a13..efe11d7 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ from setuptools import setup, find_packages setup( name="clip-interrogator", - version="0.3.4", + version="0.3.5", license='MIT', author='pharmapsychotic', author_email='me@pharmapsychotic.com', From 99c8d45e86d7310d3b91b023ce7a78f70b972236 Mon Sep 17 00:00:00 2001 From: pharmapsychotic Date: Thu, 5 Jan 2023 19:44:38 -0600 Subject: [PATCH 3/3] Make the BLIP model configurable, can set config.blip_model_type now to 'base' or 'large' --- clip_interrogator/clip_interrogator.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/clip_interrogator/clip_interrogator.py b/clip_interrogator/clip_interrogator.py index f3f3d6c..a634436 100644 --- a/clip_interrogator/clip_interrogator.py +++ b/clip_interrogator/clip_interrogator.py @@ -16,6 +16,10 @@ from torchvision.transforms.functional import InterpolationMode from tqdm import tqdm from typing import List +BLIP_MODELS = { + 'base': 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth', + 'large': 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' +} @dataclass class Config: @@ -27,7 +31,7 @@ class Config: # blip settings blip_image_eval_size: int = 384 blip_max_length: int = 32 - blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' + blip_model_type: str = 'large' # choose between 'base' or 'large' blip_num_beams: int = 8 blip_offload: bool = False @@ -39,11 +43,10 @@ class Config: cache_path: str = 'cache' chunk_size: int = 2048 data_path: str = os.path.join(os.path.dirname(__file__), 'data') - device: str = 'cuda' if torch.cuda.is_available() else 'cpu' + device: str = ("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu") flavor_intermediate_count: int = 2048 quiet: bool = False # when quiet progress bars are not shown - class Interrogator(): def __init__(self, config: Config): self.config = config @@ -56,9 +59,9 @@ class Interrogator(): configs_path = os.path.join(os.path.dirname(blip_path), 'configs') med_config = os.path.join(configs_path, 'med_config.json') blip_model = blip_decoder( - pretrained=config.blip_model_url, + pretrained=BLIP_MODELS[config.blip_model_type], image_size=config.blip_image_eval_size, - vit='large', + vit=config.blip_model_type, med_config=med_config ) blip_model.eval()