From d2c1200c230bb246bcb378c508d215388e741f8a Mon Sep 17 00:00:00 2001 From: thisforcode Date: Mon, 7 Apr 2025 06:21:55 +0530 Subject: [PATCH] Day1: Grafana Dashboard Metrics Summarizer --- .../day1-dashboard-metrics-summary.ipynb | 183 ++++++++++++++++++ .../mock_grafana_dashboard.json | 95 +++++++++ 2 files changed, 278 insertions(+) create mode 100644 week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb create mode 100644 week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json diff --git a/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb b/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb new file mode 100644 index 0000000..c91070d --- /dev/null +++ b/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb @@ -0,0 +1,183 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "de3b5d4c", + "metadata": {}, + "source": [ + "# 🧠 Grafana Dashboard Summarizer\n", + "Simulate reading a Grafana dashboard JSON and summarize its panels using GPT or plain logic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0abf3aaf", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import json\n", + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad82ca65", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "with open(\"mock_grafana_dashboard.json\", \"r\") as f:\n", + " data = json.load(f)\n", + "\n", + "dashboard = data[\"dashboard\"]\n", + "panels = dashboard[\"panels\"]\n", + "print(f\"Dashboard Title: {dashboard['title']}\")\n", + "print(f\"Total Panels: {len(panels)}\\n\")\n", + "for p in panels:\n", + " print(f\"- {p['title']} ({p['type']})\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bf45c0f", + "metadata": {}, + "outputs": [], + "source": [ + "# Extracting panel data\n", + "\n", + "panel_data = []\n", + "for p in panels:\n", + " thresholds = p.get(\"fieldConfig\", {}).get(\"defaults\", {}).get(\"thresholds\", {}).get(\"steps\", [])\n", + " panel_data.append({\n", + " \"Title\": p[\"title\"],\n", + " \"Type\": p[\"type\"],\n", + " \"Unit\": p.get(\"fieldConfig\", {}).get(\"defaults\", {}).get(\"unit\", \"N/A\"),\n", + " \"Thresholds\": thresholds\n", + " })\n", + "\n", + "df = pd.DataFrame(panel_data)\n", + "df\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90b67133", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "summary_prompt = f\"\"\"\n", + "You are a helpful assistant summarizing a system monitoring dashboard.\n", + "\n", + "Dashboard: {dashboard['title']}\n", + "Panels:\n", + "\"\"\"\n", + "for idx, row in df.iterrows():\n", + " summary_prompt += f\"- {row['Title']} [{row['Type']}] - Unit: {row['Unit']}, Thresholds: {row['Thresholds']}\\n\"\n", + "\n", + "print(summary_prompt)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69a4208c", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2eee5a32", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "660eedb7", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize():\n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a Grafana dashboard summarizer.\"},\n", + " {\"role\": \"user\", \"content\": summary_prompt}\n", + " ]\n", + ")\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55f57d56", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "summary = summarize()\n", + "display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10dbfd6c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "arunllms", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json b/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json new file mode 100644 index 0000000..1218c82 --- /dev/null +++ b/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json @@ -0,0 +1,95 @@ +{ + "dashboard": { + "id": null, + "uid": "mock-sys-metrics", + "title": "Mock System Metrics Dashboard", + "timezone": "browser", + "panels": [ + { + "id": 1, + "title": "CPU Usage (%)", + "type": "stat", + "datasource": "MockData", + "targets": [], + "fieldConfig": { + "defaults": { + "unit": "percent", + "thresholds": { + "mode": "percentage", + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 70 }, + { "color": "red", "value": 90 } + ] + } + } + }, + "options": { + "reduceOptions": { + "calcs": ["mean"] + } + } + }, + { + "id": 2, + "title": "Memory Usage", + "type": "gauge", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "bytes", + "max": 16e9, + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 12e9 }, + { "color": "red", "value": 14e9 } + ] + } + } + } + }, + { + "id": 3, + "title": "Disk Read Errors", + "type": "stat", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "short", + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 5 }, + { "color": "red", "value": 10 } + ] + } + } + } + }, + { + "id": 4, + "title": "GPU Usage (%)", + "type": "gauge", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "percent", + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 75 }, + { "color": "red", "value": 90 } + ] + } + } + } + } + ], + "schemaVersion": 30, + "version": 1, + "refresh": "30s" + }, + "overwrite": true + } + \ No newline at end of file