diff --git a/week1/community-contributions/Day 1_Code_generation_llm.ipynb b/week1/community-contributions/Day 1_Code_generation_llm.ipynb index 1736c5f..02d458a 100644 --- a/week1/community-contributions/Day 1_Code_generation_llm.ipynb +++ b/week1/community-contributions/Day 1_Code_generation_llm.ipynb @@ -7,14 +7,13 @@ "metadata": {}, "outputs": [], "source": [ - "# imports\n", - "\n", "import os\n", "import requests\n", "from dotenv import load_dotenv\n", "from bs4 import BeautifulSoup\n", "from IPython.display import Markdown, display\n", - "from openai import OpenAI\n" + "import openai \n", + "import ollama " ] }, { @@ -48,7 +47,8 @@ "metadata": {}, "outputs": [], "source": [ - "openai = OpenAI()" + "# Initialize OpenAI\n", + "openai_client = openai.OpenAI(api_key=api_key)" ] }, { @@ -73,6 +73,29 @@ " return f\"Error: {e}\"" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "421c4ebe-7017-4ac8-b4d1-7837e1a68223", + "metadata": {}, + "outputs": [], + "source": [ + "# Function to ask Ollama\n", + "def ask_ollama(prompt):\n", + " \"\"\" send a prompt to ollama and return the response \"\"\"\n", + " try:\n", + " response = ollama.chat(\n", + " model=\"llama3.2\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are an advanced AI assistant specialized in software development. You generate complete, optimized, and well-documented code for any requested approach, ensuring best practices, efficiency, and scalability. You provide explanations alongside the code, highlighting important concepts and potential improvements.\"},\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " )\n", + " return response['message']['content']\n", + " except Exception as e:\n", + " return f\"Ollama Error: {e}\" " + ] + }, { "cell_type": "code", "execution_count": null, @@ -89,9 +112,13 @@ " print(\"Goodbye!\")\n", " break\n", "\n", - " response = ask_ai(user_input)\n", - "# Display output in Markdown format\n", - " display(Markdown(response))" + " print(\"\\n **OpenAI Response:**\")\n", + " openai_response = ask_ai(user_input)\n", + " display(Markdown(openai_response))\n", + "\n", + " print(\"\\n **Ollama Response:**\")\n", + " ollama_response = ask_ollama(user_input)\n", + " display(Markdown(ollama_response))" ] }, {