From 8b689592aaf4986468bcb1a10caf34ddec67f5b7 Mon Sep 17 00:00:00 2001 From: pharmapsychotic Date: Mon, 28 Nov 2022 13:13:38 -0600 Subject: [PATCH] Free Colab is too much of a potato to handle model swapping so just pick it in the setup cell. --- clip_interrogator.ipynb | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/clip_interrogator.ipynb b/clip_interrogator.ipynb index 9822422..809ab9c 100644 --- a/clip_interrogator.ipynb +++ b/clip_interrogator.ipynb @@ -63,7 +63,10 @@ "\n", "setup()\n", "\n", - "# download cache files\n", + "\n", + "clip_model_name = 'ViT-L-14/openai' #@param [\"ViT-L-14/openai\", \"ViT-H-14/laion2b_s32b_b79k\"]\n", + "\n", + "\n", "print(\"Download preprocessed cache files...\")\n", "CACHE_URLS = [\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_artists.pkl',\n", @@ -71,6 +74,7 @@ " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_mediums.pkl',\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_movements.pkl',\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_trendings.pkl',\n", + "] if clip_model_name == 'ViT-L-14/openai' else [\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',\n", " 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',\n", @@ -81,6 +85,7 @@ "for url in CACHE_URLS:\n", " print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))\n", "\n", + "\n", "import sys\n", "sys.path.append('src/blip')\n", "sys.path.append('clip-interrogator')\n", @@ -91,16 +96,12 @@ "config = Config()\n", "config.blip_num_beams = 64\n", "config.blip_offload = False\n", - "config.chunk_size = 2048\n", - "config.flavor_intermediate_count = 2048\n", - "\n", + "config.clip_model_name = clip_model_name\n", "ci = Interrogator(config)\n", "\n", - "def inference(image, mode, clip_model_name, best_max_flavors=32):\n", - " if clip_model_name != ci.config.clip_model_name:\n", - " ci.config.clip_model_name = clip_model_name\n", - " ci.load_clip_model()\n", - " ci.config.flavor_intermediate_count = 2048 if clip_model_name == \"ViT-L-14/openai\" else 1024\n", + "def inference(image, mode, best_max_flavors=32):\n", + " ci.config.chunk_size = 2048 if ci.config.clip_model_name == \"ViT-L-14/openai\" else 1024\n", + " ci.config.flavor_intermediate_count = 2048 if ci.config.clip_model_name == \"ViT-L-14/openai\" else 1024\n", " image = image.convert('RGB')\n", " if mode == 'best':\n", " return ci.interrogate(image, max_flavors=int(best_max_flavors))\n", @@ -161,8 +162,7 @@ " \n", "inputs = [\n", " gr.inputs.Image(type='pil'),\n", - " gr.Radio(['best', 'classic', 'fast'], label='', value='best'),\n", - " gr.Dropdown([\"ViT-L-14/openai\", \"ViT-H-14/laion2b_s32b_b79k\"], value='ViT-L-14/openai', label='CLIP Model'),\n", + " gr.Radio(['best', 'fast'], label='', value='best'),\n", " gr.Number(value=16, label='best mode max flavors'),\n", "]\n", "outputs = [\n", @@ -175,7 +175,7 @@ " outputs, \n", " allow_flagging=False,\n", ")\n", - "io.launch()\n" + "io.launch(debug=False)\n" ] }, {