From 506e67b9483c13cb9675548065fa975f1bc81550 Mon Sep 17 00:00:00 2001 From: Sprites20 <49236696+sprites20@users.noreply.github.com> Date: Tue, 29 Nov 2022 00:45:01 +0800 Subject: [PATCH] 5.8 GB --- clip_interrogator/clip_interrogator.py | 6 ++-- clip_interrogator/clip_interrogator.py.bak | 6 ++-- detect.py | 36 ++++++++++++++++++--- detect.py.bak | 37 +++++++++++++++++++--- 4 files changed, 71 insertions(+), 14 deletions(-) diff --git a/clip_interrogator/clip_interrogator.py b/clip_interrogator/clip_interrogator.py index 31094ba..8185dfa 100644 --- a/clip_interrogator/clip_interrogator.py +++ b/clip_interrogator/clip_interrogator.py @@ -27,12 +27,12 @@ class Config: # blip settings blip_image_eval_size: int = 384 blip_max_length: int = 32 - blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' + blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth' blip_num_beams: int = 8 blip_offload: bool = False # clip settings - clip_model_name: str = 'ViT-H-14/laion2b_s32b_b79k' + clip_model_name: str = 'ViT-B-32/openai' clip_model_path: str = None # interrogator settings @@ -58,7 +58,7 @@ class Interrogator(): blip_model = blip_decoder( pretrained=config.blip_model_url, image_size=config.blip_image_eval_size, - vit='large', + vit='base', med_config=med_config ) blip_model.eval() diff --git a/clip_interrogator/clip_interrogator.py.bak b/clip_interrogator/clip_interrogator.py.bak index 4efaa33..fcc818e 100644 --- a/clip_interrogator/clip_interrogator.py.bak +++ b/clip_interrogator/clip_interrogator.py.bak @@ -27,12 +27,12 @@ class Config: # blip settings blip_image_eval_size: int = 384 blip_max_length: int = 32 - blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' + blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth' blip_num_beams: int = 8 blip_offload: bool = False # clip settings - clip_model_name: str = 'CLIP-ViT-g-14-laion2B-s12B-b42K' + clip_model_name: str = 'ViT-B-32/laion2b_s34b_b79K' clip_model_path: str = None # interrogator settings @@ -58,7 +58,7 @@ class Interrogator(): blip_model = blip_decoder( pretrained=config.blip_model_url, image_size=config.blip_image_eval_size, - vit='large', + vit='base', med_config=med_config ) blip_model.eval() diff --git a/detect.py b/detect.py index 6a0f2d8..b2a5323 100644 --- a/detect.py +++ b/detect.py @@ -1,5 +1,33 @@ from PIL import Image -from clip_interrogator import Interrogator, Config -image = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') -ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) -print(ci.interrogate(image)) \ No newline at end of file +#from clip_interrogator import Interrogator, Config +img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') +#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) +#print(ci.interrogate(image)) + +import sys +sys.path.append('src/blip') +sys.path.append('clip-interrogator') + +from clip_interrogator import Config, Interrogator + +config = Config() +config.blip_num_beams = 64 +config.blip_offload = False +config.chunk_size = 2048 +config.flavor_intermediate_count = 2048 + +ci = Interrogator(config) + +def inference(image, mode, clip_model_name, best_max_flavors=32): + if clip_model_name != ci.config.clip_model_name: + ci.config.clip_model_name = clip_model_name + ci.load_clip_model() + image = image.convert('RGB') + if mode == 'best': + return ci.interrogate(image, max_flavors=int(best_max_flavors)) + elif mode == 'classic': + return ci.interrogate_classic(image) + else: + return ci.interrogate_fast(image) + +print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) \ No newline at end of file diff --git a/detect.py.bak b/detect.py.bak index 48951a1..9906860 100644 --- a/detect.py.bak +++ b/detect.py.bak @@ -1,5 +1,34 @@ from PIL import Image -from clip_interrogator import Interrogator, Config -image = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') -ci = Interrogator(Config(clip_model_name="ViT-B-16/openai")) -print(ci.interrogate(image)) \ No newline at end of file +#from clip_interrogator import Interrogator, Config + +#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) +#print(ci.interrogate(image)) + +import sys +sys.path.append('src/blip') +sys.path.append('clip-interrogator') + +from clip_interrogator import Config, Interrogator + +config = Config() +config.blip_num_beams = 64 +config.blip_offload = False +config.chunk_size = 2048 +config.flavor_intermediate_count = 2048 + +ci = Interrogator(config) + +def inference(image, mode, clip_model_name, best_max_flavors=32): + if clip_model_name != ci.config.clip_model_name: + ci.config.clip_model_name = clip_model_name + ci.load_clip_model() + image = image.convert('RGB') + if mode == 'best': + return ci.interrogate(image, max_flavors=int(best_max_flavors)) + elif mode == 'classic': + return ci.interrogate_classic(image) + else: + return ci.interrogate_fast(image) + +img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') +print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) \ No newline at end of file