diff --git a/clip_interrogator/clip_interrogator.py.bak b/clip_interrogator/clip_interrogator.py.bak index fcc818e..661198c 100644 --- a/clip_interrogator/clip_interrogator.py.bak +++ b/clip_interrogator/clip_interrogator.py.bak @@ -27,12 +27,12 @@ class Config: # blip settings blip_image_eval_size: int = 384 blip_max_length: int = 32 - blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth' + blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large.pth' blip_num_beams: int = 8 blip_offload: bool = False # clip settings - clip_model_name: str = 'ViT-B-32/laion2b_s34b_b79K' + clip_model_name: str = 'ViT-B-32/openai' clip_model_path: str = None # interrogator settings diff --git a/detect.py b/detect.py index b2a5323..0218dc2 100644 --- a/detect.py +++ b/detect.py @@ -1,6 +1,7 @@ from PIL import Image #from clip_interrogator import Interrogator, Config -img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') +#@title Setup +import os, subprocess #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) #print(ci.interrogate(image)) @@ -10,6 +11,25 @@ sys.path.append('clip-interrogator') from clip_interrogator import Config, Interrogator +# download cache files +""" +print("Download preprocessed cache files...") +CACHE_URLS = [ + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_artists.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_flavors.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_mediums.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_movements.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_trendings.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl', +] +os.makedirs('cache', exist_ok=True) +for url in CACHE_URLS: + print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8')) +""" config = Config() config.blip_num_beams = 64 config.blip_offload = False @@ -29,5 +49,41 @@ def inference(image, mode, clip_model_name, best_max_flavors=32): return ci.interrogate_classic(image) else: return ci.interrogate_fast(image) - -print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) \ No newline at end of file + +from PIL import Image +#from clip_interrogator import Interrogator, Config + +#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) +#print(ci.interrogate(image)) + +import sys +sys.path.append('src/blip') +sys.path.append('clip-interrogator') + +from clip_interrogator import Config, Interrogator + +config = Config() +config.blip_num_beams = 64 +config.blip_offload = False +config.chunk_size = 2048 +config.flavor_intermediate_count = 2048 + +ci = Interrogator(config) + +def inference(image, mode, clip_model_name, best_max_flavors=16): + if clip_model_name != ci.config.clip_model_name: + ci.config.clip_model_name = clip_model_name + ci.load_clip_model() + image = image.convert('RGB') + if mode == 'best': + return ci.interrogate(image, max_flavors=int(best_max_flavors)) + elif mode == 'classic': + return ci.interrogate_classic(image) + else: + return ci.interrogate_fast(image) + +img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 180640.jpg").convert('RGB') +print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) + +img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 175414.jpg").convert('RGB') +print(inference(img, "best", clip_model_name="ViT-B-32/openai")) \ No newline at end of file diff --git a/detect.py.bak b/detect.py.bak index 9906860..7b3e221 100644 --- a/detect.py.bak +++ b/detect.py.bak @@ -1,5 +1,57 @@ from PIL import Image #from clip_interrogator import Interrogator, Config +#@title Setup +import os, subprocess +#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) +#print(ci.interrogate(image)) + +import sys +sys.path.append('src/blip') +sys.path.append('clip-interrogator') + +from clip_interrogator import Config, Interrogator + +# download cache files +""" +print("Download preprocessed cache files...") +CACHE_URLS = [ + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_artists.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_flavors.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_mediums.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_movements.pkl', + #'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_trendings.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl', + #'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl', +] +os.makedirs('cache', exist_ok=True) +for url in CACHE_URLS: + print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8')) +""" +config = Config() +config.blip_num_beams = 64 +config.blip_offload = False +config.chunk_size = 2048 +config.flavor_intermediate_count = 2048 + +ci = Interrogator(config) + +def inference(image, mode, clip_model_name, best_max_flavors=32): + if clip_model_name != ci.config.clip_model_name: + ci.config.clip_model_name = clip_model_name + ci.load_clip_model() + image = image.convert('RGB') + if mode == 'best': + return ci.interrogate(image, max_flavors=int(best_max_flavors)) + elif mode == 'classic': + return ci.interrogate_classic(image) + else: + return ci.interrogate_fast(image) + +from PIL import Image +#from clip_interrogator import Interrogator, Config #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) #print(ci.interrogate(image)) @@ -30,5 +82,8 @@ def inference(image, mode, clip_model_name, best_max_flavors=32): else: return ci.interrogate_fast(image) -img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') -print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) \ No newline at end of file +img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 180640.jpg").convert('RGB') +print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) + +img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 175414.jpg").convert('RGB') +print(inference(img, "best", clip_model_name="ViT-B-32/openai")) \ No newline at end of file