@ -6,7 +6,7 @@
"id": "3jm8RYrLqvzz"
},
"source": [
"# CLIP Interrogator 2 by [@pharmapsychotic](https://twitter.com/pharmapsychotic) \n",
"# CLIP Interrogator 2.1 by [@pharmapsychotic](https://twitter.com/pharmapsychotic) \n",
"\n",
"<br>\n",
"\n",
@ -70,25 +70,34 @@
"import torch\n",
"from clip_interrogator import Interrogator, Config\n",
"\n",
"ci = Interrogator(Config())\n"
"ci = Interrogator(Config())\n",
"\n",
"def inference(image, mode):\n",
" image = image.convert('RGB')\n",
" if mode == 'best':\n",
" return ci.interrogate(image)\n",
" elif mode == 'classic':\n",
" return ci.interrogate_classic(image)\n",
" else:\n",
" return ci.interrogate_fast(image)\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 4 ,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 677
},
"cellView": "form",
"id": "Pf6qkFG6MPRj",
"outputId": "5f959af5-f6dd-43f2-f8df-8331a422d317 "
"outputId": "8d542b56-8be7-453d-bf27-d0540a774c7d "
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"output_type": "stream",
"text": [
"Colab notebook detected. To show errors in colab notebook, set `debug=True` in `launch()`\n",
"\n",
@ -99,64 +108,29 @@
]
},
{
"output_type": "display_data",
"data": {
"application/javascript": "(async (port, path, width, height, cache, element) => {\n if (!google.colab.kernel.accessAllowed && !cache) {\n return;\n }\n element.appendChild(document.createTextNode(''));\n const url = await google.colab.kernel.proxyPort(port, {cache});\n\n const external_link = document.createElement('div');\n external_link.innerHTML = `\n <div style=\"font-family: monospace; margin-bottom: 0.5rem\">\n Running on <a href=${new URL(path, url).toString()} target=\"_blank\">\n https://localhost:${port}${path}\n </a>\n </div>\n `;\n element.appendChild(external_link);\n\n const iframe = document.createElement('iframe');\n iframe.src = new URL(path, url).toString();\n iframe.height = height;\n iframe.allow = \"autoplay; camera; microphone; clipboard-read; clipboard-write;\"\n iframe.width = width;\n iframe.style.border = 0;\n element.appendChild(iframe);\n })(7860, \"/\", \"100%\", 500, false, window.element)",
"text/plain": [
"<IPython.core.display.Javascript object>"
],
"application/javascript": [
"(async (port, path, width, height, cache, element) => {\n",
" if (!google.colab.kernel.accessAllowed && !cache) {\n",
" return;\n",
" }\n",
" element.appendChild(document.createTextNode(''));\n",
" const url = await google.colab.kernel.proxyPort(port, {cache});\n",
"\n",
" const external_link = document.createElement('div');\n",
" external_link.innerHTML = `\n",
" <div style=\"font-family: monospace; margin-bottom: 0.5rem\">\n",
" Running on <a href=${new URL(path, url).toString()} target=\"_blank\">\n",
" https://localhost:${port}${path}\n",
" </a>\n",
" </div>\n",
" `;\n",
" element.appendChild(external_link);\n",
"\n",
" const iframe = document.createElement('iframe');\n",
" iframe.src = new URL(path, url).toString();\n",
" iframe.height = height;\n",
" iframe.allow = \"autoplay; camera; microphone; clipboard-read; clipboard-write;\"\n",
" iframe.width = width;\n",
" iframe.style.border = 0;\n",
" element.appendChild(iframe);\n",
" })(7866, \"/\", \"100%\", 500, false, window.element)"
]
},
"metadata": {}
"metadata": {},
"output_type": "display_data"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(<gradio.routes.App at 0x7f6f06fc3450>, 'http://127.0.0.1:7866 /', None)"
"(<gradio.routes.App at 0x7f894e553710>, 'http://127.0.0.1:7860/', None)"
]
},
"execution_count": 4,
"metadata": {},
"execution_count": 9
"output_type": "execute_result"
}
],
"source": [
"#@title Run!\n",
"\n",
"def inference(image, mode):\n",
" image = image.convert('RGB')\n",
" if mode == 'best':\n",
" return ci.interrogate(image)\n",
" elif mode == 'classic':\n",
" return ci.interrogate_classic(image)\n",
" else:\n",
" return ci.interrogate_fast(image)\n",
" \n",
"#@title Image to prompt! 🖼️ -> 📝\n",
" \n",
"inputs = [\n",
" gr.inputs.Image(type='pil'),\n",
" gr.Radio(['best', 'classic', 'fast'], label='', value='best'),\n",
@ -173,6 +147,58 @@
")\n",
"io.launch()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "OGmvkzITN4Hz"
},
"outputs": [],
"source": [
"#@title Batch process a folder of images 📁 -> 📝\n",
"\n",
"#@markdown This will generate prompts for every image in a folder and save results to desc.csv in the same folder.\n",
"#@markdown You can use the generated csv in the [Stable Diffusion Finetuning](https://colab.research.google.com/drive/1vrh_MUSaAMaC5tsLWDxkFILKJ790Z4Bl?usp=sharing)\n",
"#@markdown notebook or use it to train a different model or just print it out for fun. \n",
"#@markdown If you make something cool, I'd love to see it! Tag me on twitter or something. 😀\n",
"\n",
"import csv\n",
"import os\n",
"from IPython.display import display\n",
"from PIL import Image\n",
"from tqdm import tqdm\n",
"\n",
"folder_path = \"/content/my_images\" #@param {type:\"string\"}\n",
"mode = 'best' #@param [\"best\",\"classic\", \"fast\"]\n",
"\n",
"\n",
"files = [f for f in os.listdir(folder_path) if f.endswith('.jpg') or f.endswith('.png')] if os.path.exists(folder_path) else []\n",
"prompts = []\n",
"for file in files:\n",
" image = Image.open(os.path.join(folder_path, file)).convert('RGB')\n",
" prompt = inference(image, mode)\n",
" prompts.append(prompt)\n",
"\n",
" thumb = image.copy()\n",
" thumb.thumbnail([256, 256])\n",
" display(thumb)\n",
"\n",
" print(prompt)\n",
"\n",
"if len(prompts):\n",
" csv_path = os.path.join(folder_path, 'desc.csv')\n",
" with open(csv_path, 'w', encoding='utf-8', newline='') as f:\n",
" w = csv.writer(f, quoting=csv.QUOTE_MINIMAL)\n",
" w.writerow(['image', 'prompt'])\n",
" for file, prompt in zip(files, prompts):\n",
" w.writerow([file, prompt])\n",
"\n",
" print(f\"\\n\\n\\n\\nGenerated {len(prompts)} and saved to {csv_path}, enjoy!\")\n",
"else:\n",
" print(f\"Sorry, I couldn't find any images in {folder_path}\")\n"
]
}
],
"metadata": {
@ -207,4 +233,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
}
}