File size: 3,532 Bytes
f443c5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "m7rU-pjX3Y1O"
},
"outputs": [],
"source": [
"%%capture\n",
"!pip install gradio transformers==4.30.2 pillow\n",
"!pip install torch torchvision hf_xet timm==1.0.10\n",
"!pip install flash-attn --no-build-isolation"
]
},
{
"cell_type": "code",
"source": [
"import gradio as gr\n",
"import torch\n",
"from PIL import Image\n",
"from transformers import AutoProcessor, AutoModelForCausalLM\n",
"\n",
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"vision_language_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()\n",
"vision_language_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)\n",
"\n",
"def describe_image(uploaded_image):\n",
" \"\"\"\n",
" Generates a detailed description of the input image.\n",
"\n",
" Args:\n",
" uploaded_image (PIL.Image.Image or numpy.ndarray): The image to describe.\n",
"\n",
" Returns:\n",
" str: A detailed textual description of the image.\n",
" \"\"\"\n",
" if not isinstance(uploaded_image, Image.Image):\n",
" uploaded_image = Image.fromarray(uploaded_image)\n",
"\n",
" inputs = vision_language_processor(text=\"<MORE_DETAILED_CAPTION>\", images=uploaded_image, return_tensors=\"pt\").to(device)\n",
" with torch.no_grad():\n",
" generated_ids = vision_language_model.generate(\n",
" input_ids=inputs[\"input_ids\"],\n",
" pixel_values=inputs[\"pixel_values\"],\n",
" max_new_tokens=1024,\n",
" early_stopping=False,\n",
" do_sample=False,\n",
" num_beams=3,\n",
" )\n",
" generated_text = vision_language_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]\n",
" processed_description = vision_language_processor.post_process_generation(\n",
" generated_text,\n",
" task=\"<MORE_DETAILED_CAPTION>\",\n",
" image_size=(uploaded_image.width, uploaded_image.height)\n",
" )\n",
" image_description = processed_description[\"<MORE_DETAILED_CAPTION>\"]\n",
" print(\"\\nImage description generated!:\", image_description)\n",
" return image_description\n",
"\n",
"image_description_interface = gr.Interface(\n",
" fn=describe_image,\n",
" inputs=gr.Image(label=\"Upload Image\"),\n",
" outputs=gr.Textbox(label=\"Generated Caption\", lines=4, show_copy_button=True),\n",
" live=False,\n",
")\n",
"\n",
"image_description_interface.launch(debug=True, ssr_mode=False)"
],
"metadata": {
"id": "kW4MjaOs3c9E"
},
"execution_count": null,
"outputs": []
}
]
} |