Browse Source

Merge branch 'main' of github.com:pharmapsychotic/clip-interrogator into negative

pull/40/head
pharmapsychotic 2 years ago
parent
commit
392113de9a
  1. 7
      README.md
  2. 2
      clip_interrogator/__init__.py
  3. 13
      clip_interrogator/clip_interrogator.py
  4. 1
      requirements.txt
  5. 2
      setup.py

7
README.md

@ -35,15 +35,14 @@ Install with PIP
# install torch with GPU support for example: # install torch with GPU support for example:
pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu117 pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu117
# install clip-interrogator and blip # install clip-interrogator
pip install clip-interrogator==0.3.3 pip install clip-interrogator==0.3.5
pip install git+https://github.com/pharmapsychotic/BLIP.git
``` ```
You can then use it in your script You can then use it in your script
```python ```python
from PIL import Image from PIL import Image
from clip_interrogator import Interrogator, Config from clip_interrogator import Config, Interrogator
image = Image.open(image_path).convert('RGB') image = Image.open(image_path).convert('RGB')
ci = Interrogator(Config(clip_model_name="ViT-L-14/openai")) ci = Interrogator(Config(clip_model_name="ViT-L-14/openai"))
print(ci.interrogate(image)) print(ci.interrogate(image))

2
clip_interrogator/__init__.py

@ -1,4 +1,4 @@
from .clip_interrogator import Interrogator, Config from .clip_interrogator import Interrogator, Config
__version__ = '0.3.3' __version__ = '0.3.5'
__author__ = 'pharmapsychotic' __author__ = 'pharmapsychotic'

13
clip_interrogator/clip_interrogator.py

@ -16,6 +16,10 @@ from torchvision.transforms.functional import InterpolationMode
from tqdm import tqdm from tqdm import tqdm
from typing import List from typing import List
BLIP_MODELS = {
'base': 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth',
'large': 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth'
}
@dataclass @dataclass
class Config: class Config:
@ -27,7 +31,7 @@ class Config:
# blip settings # blip settings
blip_image_eval_size: int = 384 blip_image_eval_size: int = 384
blip_max_length: int = 32 blip_max_length: int = 32
blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' blip_model_type: str = 'large' # choose between 'base' or 'large'
blip_num_beams: int = 8 blip_num_beams: int = 8
blip_offload: bool = False blip_offload: bool = False
@ -39,11 +43,10 @@ class Config:
cache_path: str = 'cache' cache_path: str = 'cache'
chunk_size: int = 2048 chunk_size: int = 2048
data_path: str = os.path.join(os.path.dirname(__file__), 'data') data_path: str = os.path.join(os.path.dirname(__file__), 'data')
device: str = 'cuda' if torch.cuda.is_available() else 'cpu' device: str = ("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu")
flavor_intermediate_count: int = 2048 flavor_intermediate_count: int = 2048
quiet: bool = False # when quiet progress bars are not shown quiet: bool = False # when quiet progress bars are not shown
class Interrogator(): class Interrogator():
def __init__(self, config: Config): def __init__(self, config: Config):
self.config = config self.config = config
@ -56,9 +59,9 @@ class Interrogator():
configs_path = os.path.join(os.path.dirname(blip_path), 'configs') configs_path = os.path.join(os.path.dirname(blip_path), 'configs')
med_config = os.path.join(configs_path, 'med_config.json') med_config = os.path.join(configs_path, 'med_config.json')
blip_model = blip_decoder( blip_model = blip_decoder(
pretrained=config.blip_model_url, pretrained=BLIP_MODELS[config.blip_model_type],
image_size=config.blip_image_eval_size, image_size=config.blip_image_eval_size,
vit='large', vit=config.blip_model_type,
med_config=med_config med_config=med_config
) )
blip_model.eval() blip_model.eval()

1
requirements.txt

@ -4,3 +4,4 @@ Pillow
requests requests
tqdm tqdm
open_clip_torch open_clip_torch
blip-vit

2
setup.py

@ -5,7 +5,7 @@ from setuptools import setup, find_packages
setup( setup(
name="clip-interrogator", name="clip-interrogator",
version="0.3.3", version="0.3.5",
license='MIT', license='MIT',
author='pharmapsychotic', author='pharmapsychotic',
author_email='me@pharmapsychotic.com', author_email='me@pharmapsychotic.com',

Loading…
Cancel
Save