Browse Source

6 GB

pull/22/head
Sprites20 2 years ago
parent
commit
99304049e9
  1. 4
      clip_interrogator/clip_interrogator.py.bak
  2. 58
      detect.py
  3. 57
      detect.py.bak

4
clip_interrogator/clip_interrogator.py.bak

@ -27,12 +27,12 @@ class Config:
# blip settings # blip settings
blip_image_eval_size: int = 384 blip_image_eval_size: int = 384
blip_max_length: int = 32 blip_max_length: int = 32
blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth' blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large.pth'
blip_num_beams: int = 8 blip_num_beams: int = 8
blip_offload: bool = False blip_offload: bool = False
# clip settings # clip settings
clip_model_name: str = 'ViT-B-32/laion2b_s34b_b79K' clip_model_name: str = 'ViT-B-32/openai'
clip_model_path: str = None clip_model_path: str = None
# interrogator settings # interrogator settings

58
detect.py

@ -1,6 +1,7 @@
from PIL import Image from PIL import Image
#from clip_interrogator import Interrogator, Config #from clip_interrogator import Interrogator, Config
img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') #@title Setup
import os, subprocess
#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
#print(ci.interrogate(image)) #print(ci.interrogate(image))
@ -10,6 +11,25 @@ sys.path.append('clip-interrogator')
from clip_interrogator import Config, Interrogator from clip_interrogator import Config, Interrogator
# download cache files
"""
print("Download preprocessed cache files...")
CACHE_URLS = [
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_artists.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_flavors.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_mediums.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_movements.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_trendings.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
]
os.makedirs('cache', exist_ok=True)
for url in CACHE_URLS:
print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
"""
config = Config() config = Config()
config.blip_num_beams = 64 config.blip_num_beams = 64
config.blip_offload = False config.blip_offload = False
@ -30,4 +50,40 @@ def inference(image, mode, clip_model_name, best_max_flavors=32):
else: else:
return ci.interrogate_fast(image) return ci.interrogate_fast(image)
from PIL import Image
#from clip_interrogator import Interrogator, Config
#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
#print(ci.interrogate(image))
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
from clip_interrogator import Config, Interrogator
config = Config()
config.blip_num_beams = 64
config.blip_offload = False
config.chunk_size = 2048
config.flavor_intermediate_count = 2048
ci = Interrogator(config)
def inference(image, mode, clip_model_name, best_max_flavors=16):
if clip_model_name != ci.config.clip_model_name:
ci.config.clip_model_name = clip_model_name
ci.load_clip_model()
image = image.convert('RGB')
if mode == 'best':
return ci.interrogate(image, max_flavors=int(best_max_flavors))
elif mode == 'classic':
return ci.interrogate_classic(image)
else:
return ci.interrogate_fast(image)
img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 180640.jpg").convert('RGB')
print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) print(inference(img, "fast", clip_model_name="ViT-B-32/openai"))
img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 175414.jpg").convert('RGB')
print(inference(img, "best", clip_model_name="ViT-B-32/openai"))

57
detect.py.bak

@ -1,5 +1,57 @@
from PIL import Image from PIL import Image
#from clip_interrogator import Interrogator, Config #from clip_interrogator import Interrogator, Config
#@title Setup
import os, subprocess
#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
#print(ci.interrogate(image))
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
from clip_interrogator import Config, Interrogator
# download cache files
"""
print("Download preprocessed cache files...")
CACHE_URLS = [
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_artists.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_flavors.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_mediums.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_movements.pkl',
#'https://huggingface.co/pharma/ci-preprocess/raw/main/ViT-L-14_openai_trendings.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl',
#'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
]
os.makedirs('cache', exist_ok=True)
for url in CACHE_URLS:
print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
"""
config = Config()
config.blip_num_beams = 64
config.blip_offload = False
config.chunk_size = 2048
config.flavor_intermediate_count = 2048
ci = Interrogator(config)
def inference(image, mode, clip_model_name, best_max_flavors=32):
if clip_model_name != ci.config.clip_model_name:
ci.config.clip_model_name = clip_model_name
ci.load_clip_model()
image = image.convert('RGB')
if mode == 'best':
return ci.interrogate(image, max_flavors=int(best_max_flavors))
elif mode == 'classic':
return ci.interrogate_classic(image)
else:
return ci.interrogate_fast(image)
from PIL import Image
#from clip_interrogator import Interrogator, Config
#ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
#print(ci.interrogate(image)) #print(ci.interrogate(image))
@ -30,5 +82,8 @@ def inference(image, mode, clip_model_name, best_max_flavors=32):
else: else:
return ci.interrogate_fast(image) return ci.interrogate_fast(image)
img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 180640.jpg").convert('RGB')
print(inference(img, "fast", clip_model_name="ViT-B-32/openai")) print(inference(img, "fast", clip_model_name="ViT-B-32/openai"))
img = Image.open("C:/Users/NakaMura/Desktop/Screenshot 2022-11-27 175414.jpg").convert('RGB')
print(inference(img, "best", clip_model_name="ViT-B-32/openai"))
Loading…
Cancel
Save