1 changed files with 302 additions and 0 deletions
@ -0,0 +1,302 @@ |
|||||||
|
{ |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 0, |
||||||
|
"metadata": { |
||||||
|
"colab": { |
||||||
|
"provenance": [], |
||||||
|
"gpuType": "T4" |
||||||
|
}, |
||||||
|
"kernelspec": { |
||||||
|
"name": "python3", |
||||||
|
"display_name": "Python 3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"name": "python" |
||||||
|
}, |
||||||
|
"accelerator": "GPU" |
||||||
|
}, |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"source": [ |
||||||
|
"# Create meeting minutes from an Audio file\n", |
||||||
|
"\n", |
||||||
|
"I downloaded some Denver City Council meeting minutes and selected a portion of the meeting for us to transcribe. You can download it here: \n", |
||||||
|
"https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n", |
||||||
|
"\n", |
||||||
|
"If you'd rather work with the original data, the HuggingFace dataset is [here](https://huggingface.co/datasets/huuuyeah/meetingbank) and the audio can be downloaded [here](https://huggingface.co/datasets/huuuyeah/MeetingBank_Audio/tree/main).\n", |
||||||
|
"\n", |
||||||
|
"The goal of this product is to use the Audio to generate meeting minutes, including actions.\n", |
||||||
|
"\n", |
||||||
|
"For this project, you can either use the Denver meeting minutes, or you can record something of your own!\n", |
||||||
|
"\n", |
||||||
|
"## Please note:\n", |
||||||
|
"\n", |
||||||
|
"When you run the pip installs in the first cell below, you might get this error - it can be safely ignored - it sounds quite severe, but it doesn't seem to affect anything else in this project!\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"> ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", |
||||||
|
"gcsfs 2024.10.0 requires fsspec==2024.10.0, but you have fsspec 2024.9.0 which is incompatible.\n", |
||||||
|
"\n" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "It89APiAtTUF" |
||||||
|
} |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "f2vvgnFpHpID" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "FW8nl3XRFrz0" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"from google.colab import drive\n", |
||||||
|
"from huggingface_hub import login\n", |
||||||
|
"from google.colab import userdata\n", |
||||||
|
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", |
||||||
|
"import torch\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Constants\n", |
||||||
|
"\n", |
||||||
|
"AUDIO_MODEL = \"whisper-1\"\n", |
||||||
|
"LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "q3D1_T0uG_Qh" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# New capability - connect this Colab to my Google Drive\n", |
||||||
|
"# See immediately below this for instructions to obtain denver_extract.mp3\n", |
||||||
|
"\n", |
||||||
|
"drive.mount(\"/content/drive\")\n", |
||||||
|
"audio_filename = \"/content/drive/MyDrive/llms/denver_extract.mp3\"" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "Es9GkQ0FGCMt" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"source": [ |
||||||
|
"# Download denver_extract.mp3\n", |
||||||
|
"\n", |
||||||
|
"You can either use the same file as me, the extract from Denver city council minutes, or you can try your own..\n", |
||||||
|
"\n", |
||||||
|
"If you want to use the same as me, then please download my extract here, and put this on your Google Drive: \n", |
||||||
|
"https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "HTl3mcjyzIEE" |
||||||
|
} |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Sign in to HuggingFace Hub\n", |
||||||
|
"\n", |
||||||
|
"hf_token = userdata.get('HF_TOKEN')\n", |
||||||
|
"login(hf_token, add_to_git_credential=True)" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "xYW8kQYtF-3L" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Sign in to OpenAI using Secrets in Colab\n", |
||||||
|
"\n", |
||||||
|
"openai_api_key = userdata.get('OPENAI_API_KEY')\n", |
||||||
|
"openai = OpenAI(api_key=openai_api_key)" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "qP6OB2OeGC2C" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Initialize Llama model and tokenizer\n", |
||||||
|
"\n", |
||||||
|
"quant_config = BitsAndBytesConfig(\n", |
||||||
|
" load_in_4bit=True,\n", |
||||||
|
" bnb_4bit_use_double_quant=True,\n", |
||||||
|
" bnb_4bit_compute_dtype=torch.bfloat16,\n", |
||||||
|
" bnb_4bit_quant_type=\"nf4\"\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", |
||||||
|
"tokenizer.pad_token = tokenizer.eos_token\n", |
||||||
|
"\n", |
||||||
|
"model = AutoModelForCausalLM.from_pretrained(\n", |
||||||
|
" LLAMA,\n", |
||||||
|
" device_map=\"auto\",\n", |
||||||
|
" quantization_config=quant_config\n", |
||||||
|
")" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "hgQBeIYUyaqj" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Generate meeting minutes\n", |
||||||
|
"\n", |
||||||
|
"def generate_minutes(transcription, model, tokenizer, progress=gr.Progress()):\n", |
||||||
|
" progress(0.6, desc=\"Generating meeting minutes from transcript...\")\n", |
||||||
|
"\n", |
||||||
|
" system_message = \"You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown.\"\n", |
||||||
|
" user_prompt = f\"Below is an extract transcript of a meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\\n{transcription}\"\n", |
||||||
|
"\n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_message},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
" ]\n", |
||||||
|
"\n", |
||||||
|
" inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", |
||||||
|
" outputs = model.generate(inputs, max_new_tokens=2000)\n", |
||||||
|
" response = tokenizer.decode(outputs[0])\n", |
||||||
|
"\n", |
||||||
|
" # Clean up the response, keep only the minutes\n", |
||||||
|
" progress(0.9, desc=\"Cleaning and formatting minutes...\")\n", |
||||||
|
" response = response.split(\"<|end_header_id|>\")[-1].strip().replace(\"<|eot_id|>\",\"\")\n", |
||||||
|
"\n", |
||||||
|
" return response" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "u9aFA7tjy3Ri" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Transcribe the uploaded audio file using OpenAI's Whisper model\n", |
||||||
|
"\n", |
||||||
|
"def transcribe_audio(audio_path, progress=gr.Progress()):\n", |
||||||
|
" progress(0.3, desc=\"Creating transcript from audio...\")\n", |
||||||
|
"\n", |
||||||
|
" try:\n", |
||||||
|
" with open(audio_path, \"rb\") as audio_file:\n", |
||||||
|
" transcription = openai.audio.transcriptions.create(\n", |
||||||
|
" model=AUDIO_MODEL,\n", |
||||||
|
" file=audio_file,\n", |
||||||
|
" response_format=\"text\"\n", |
||||||
|
" )\n", |
||||||
|
" return transcription\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" return f\"Error during transcription: {str(e)}\"" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "OEuqR90Vy4AZ" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Process the uploaded audio file, transcribe it, and generate meeting minutes\n", |
||||||
|
"\n", |
||||||
|
"def process_upload(audio_file, progress=gr.Progress()):\n", |
||||||
|
" progress(0.1, desc=\"Starting process...\")\n", |
||||||
|
"\n", |
||||||
|
" if audio_file is None:\n", |
||||||
|
" return \"Please upload an audio file.\"\n", |
||||||
|
"\n", |
||||||
|
" try:\n", |
||||||
|
" # Check file format\n", |
||||||
|
" if not str(audio_file).lower().endswith('.mp3'):\n", |
||||||
|
" return \"Please upload an MP3 file.\"\n", |
||||||
|
"\n", |
||||||
|
" # Get transcription\n", |
||||||
|
" transcription = transcribe_audio(audio_file)\n", |
||||||
|
" if transcription.startswith(\"Error\"):\n", |
||||||
|
" return transcription\n", |
||||||
|
"\n", |
||||||
|
" # Generate minutes\n", |
||||||
|
" minutes = generate_minutes(transcription, model, tokenizer)\n", |
||||||
|
" progress(1.0, desc=\"Process complete!\")\n", |
||||||
|
" return minutes\n", |
||||||
|
"\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" return f\"Error processing file: {str(e)}\"" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "lmdsy2iDy5d7" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Create Gradio interface\n", |
||||||
|
"\n", |
||||||
|
"interface = gr.Interface(\n", |
||||||
|
" fn=process_upload,\n", |
||||||
|
" inputs=gr.Audio(type=\"filepath\", label=\"Upload MP3 File\", format=\"mp3\"),\n", |
||||||
|
" outputs=gr.Markdown(label=\"Meeting Minutes\", min_height=60),\n", |
||||||
|
" title=\"Meeting Minutes Generator\",\n", |
||||||
|
" description=\"Upload an MP3 recording of your meeting to get AI-generated meeting minutes. This process may take a few minutes.\",\n", |
||||||
|
" flagging_mode=\"never\"\n", |
||||||
|
")" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "k2U2bWtey7Yo" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"source": [ |
||||||
|
"# Launch Gradio interface\n", |
||||||
|
"\n", |
||||||
|
"interface.launch()" |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"id": "X3JbzRNRy9oG" |
||||||
|
}, |
||||||
|
"execution_count": null, |
||||||
|
"outputs": [] |
||||||
|
} |
||||||
|
] |
||||||
|
} |
Loading…
Reference in new issue