Browse Source

5.8 GB

pull/22/head
Sprites20 2 years ago
parent
commit
506e67b948
  1. 6
      clip_interrogator/clip_interrogator.py
  2. 6
      clip_interrogator/clip_interrogator.py.bak
  3. 36
      detect.py
  4. 37
      detect.py.bak

6
clip_interrogator/clip_interrogator.py

@ -27,12 +27,12 @@ class Config:
# blip settings # blip settings
blip_image_eval_size: int = 384 blip_image_eval_size: int = 384
blip_max_length: int = 32 blip_max_length: int = 32
blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth'
blip_num_beams: int = 8 blip_num_beams: int = 8
blip_offload: bool = False blip_offload: bool = False
# clip settings # clip settings
clip_model_name: str = 'ViT-H-14/laion2b_s32b_b79k' clip_model_name: str = 'ViT-B-32/openai'
clip_model_path: str = None clip_model_path: str = None
# interrogator settings # interrogator settings
@ -58,7 +58,7 @@ class Interrogator():
blip_model = blip_decoder( blip_model = blip_decoder(
pretrained=config.blip_model_url, pretrained=config.blip_model_url,
image_size=config.blip_image_eval_size, image_size=config.blip_image_eval_size,
vit='large', vit='base',
med_config=med_config med_config=med_config
) )
blip_model.eval() blip_model.eval()

6
clip_interrogator/clip_interrogator.py.bak

@ -27,12 +27,12 @@ class Config:
# blip settings # blip settings
blip_image_eval_size: int = 384 blip_image_eval_size: int = 384
blip_max_length: int = 32 blip_max_length: int = 32
blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' blip_model_url: str = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_14M.pth'
blip_num_beams: int = 8 blip_num_beams: int = 8
blip_offload: bool = False blip_offload: bool = False
# clip settings # clip settings
clip_model_name: str = 'CLIP-ViT-g-14-laion2B-s12B-b42K' clip_model_name: str = 'ViT-B-32/laion2b_s34b_b79K'
clip_model_path: str = None clip_model_path: str = None
# interrogator settings # interrogator settings
@ -58,7 +58,7 @@ class Interrogator():
blip_model = blip_decoder( blip_model = blip_decoder(
pretrained=config.blip_model_url, pretrained=config.blip_model_url,
image_size=config.blip_image_eval_size, image_size=config.blip_image_eval_size,
vit='large', vit='base',
med_config=med_config med_config=med_config
) )
blip_model.eval() blip_model.eval()

36
detect.py

@ -1,5 +1,33 @@
from PIL import Image from PIL import Image
from clip_interrogator import Interrogator, Config #from clip_interrogator import Interrogator, Config
image = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB') img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB')
ci = Interrogator(Config(clip_model_name="ViT-B-32/openai")) #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
print(ci.interrogate(image)) #print(ci.interrogate(image))
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
from clip_interrogator import Config, Interrogator
config = Config()
config.blip_num_beams = 64
config.blip_offload = False
config.chunk_size = 2048
config.flavor_intermediate_count = 2048
ci = Interrogator(config)
def inference(image, mode, clip_model_name, best_max_flavors=32):
if clip_model_name != ci.config.clip_model_name:
ci.config.clip_model_name = clip_model_name
ci.load_clip_model()
image = image.convert('RGB')
if mode == 'best':
return ci.interrogate(image, max_flavors=int(best_max_flavors))
elif mode == 'classic':
return ci.interrogate_classic(image)
else:
return ci.interrogate_fast(image)
print(inference(img, "fast", clip_model_name="ViT-B-32/openai"))

37
detect.py.bak

@ -1,5 +1,34 @@
from PIL import Image from PIL import Image
from clip_interrogator import Interrogator, Config #from clip_interrogator import Interrogator, Config
image = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB')
ci = Interrogator(Config(clip_model_name="ViT-B-16/openai")) #ci = Interrogator(Config(clip_model_name="ViT-B-32/openai"))
print(ci.interrogate(image)) #print(ci.interrogate(image))
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
from clip_interrogator import Config, Interrogator
config = Config()
config.blip_num_beams = 64
config.blip_offload = False
config.chunk_size = 2048
config.flavor_intermediate_count = 2048
ci = Interrogator(config)
def inference(image, mode, clip_model_name, best_max_flavors=32):
if clip_model_name != ci.config.clip_model_name:
ci.config.clip_model_name = clip_model_name
ci.load_clip_model()
image = image.convert('RGB')
if mode == 'best':
return ci.interrogate(image, max_flavors=int(best_max_flavors))
elif mode == 'classic':
return ci.interrogate_classic(image)
else:
return ci.interrogate_fast(image)
img = Image.open("C:/Users/NakaMura/Desktop/2163670-bigthumbnail.jpg").convert('RGB')
print(inference(img, "fast", clip_model_name="ViT-B-32/openai"))
Loading…
Cancel
Save