Browse Source

Added my contributions to community-contributions

pull/88/head
hushyangqing 4 months ago
parent
commit
7c14d0cfd4
  1. 193
      week1/community-contributions/week1 EXERCISE.ipynb

193
week1/community-contributions/week1 EXERCISE.ipynb

@ -13,180 +13,100 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": null,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"import os\n",
"import requests\n",
"import json \n",
"import ollama\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import ollama\n"
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": null,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
"source": [
"# constants\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"MODEL_LLAMA = 'llama3.2'\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "0bb65a08-9090-434a-b99d-5659a370cfbc",
"metadata": {},
"outputs": [],
"source": [
"# Prompts\n",
"\n",
"system_prompt = \"You are a tutor and helps with the user questions in detail with markdown respond with key point \\\n",
"considering the recent development around the world, keep the response in most appropriate tone \\n\"\n",
"\n",
"system_prompt += \"Some of Examples are\"\n",
"system_prompt += \"\"\"\n",
"{\"question\": \"1+1?\", \"response\": \"2\"},\n",
"{\"question\": \"why we shouls learn LLM Models?\", \"response\": \" Learning about Large Language Models (LLMs) is important because they are a rapidly evolving technology with the potential to significantly impact various industries, offering advanced capabilities in text generation, translation, information retrieval, and more, which can be valuable for professionals across diverse fields, allowing them to enhance their work and gain a competitive edge by understanding and utilizing these powerful language processing tools.\\ \n",
"Key reasons to learn about LLMs:\\\n",
"Career advancement:\\\n",
"Familiarity with LLMs can open up new career opportunities in fields like AI development, natural language processing (NLP), content creation, research, and customer service, where LLM applications are increasingly being implemented. \\\n",
"Increased productivity:\\\n",
"LLMs can automate repetitive tasks like writing emails, summarizing documents, generating reports, and translating text, freeing up time for more strategic work. \\\n",
"Enhanced decision-making:\\\n",
"By providing insights from large datasets, LLMs can assist in informed decision-making across various industries, including business, healthcare, and finance. \\\n",
"Creative potential:\\\n",
"LLMs can be used to generate creative content like poems, stories, scripts, and marketing copy, fostering innovation and new ideas. \\\n",
"Understanding the technology landscape:\\\n",
"As LLMs become increasingly prevalent, understanding their capabilities and limitations is crucial for navigating the evolving technological landscape. \\\n",
"What is a large language model (LLM)? - Cloudflare\\\n",
"A large language model (LLM) is a type of artificial intelligence (AI) program that can recognize and generate text, among other t...\\\n",
" \"},\n",
"{\"question\": \"what is the future of AI?\", \"response\": \"AI is predicted to grow increasingly pervasive as technology develops, revolutionising sectors including healthcare, banking, and transportation\"},\n",
"\"\"\"\n"
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": null,
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key looks good so far\n"
]
}
],
"outputs": [],
"source": [
"# set up environment\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"system_prompt = \"You are an assistant that analyzes a technical question \\\n",
"and responds with a short, clear, structured explanation. Response in markdown\"\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
" \n",
"MODEL = 'gpt-4o-mini'\n",
"openai = OpenAI()"
"def get_user_prompt(question):\n",
" user_prompt = f\"You are looking at a technical questions as following: \\n\"\n",
" user_prompt += question\n",
" user_prompt += f\"\\nPlease response answer with a logical explanation\"\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": null,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {},
"outputs": [],
"source": [
"# here is the question; type over this to ask something new\n",
"\n",
"user_question = \"\"\"\n",
"How important it is for a Data Engineers to learn LLM, Considering the evolution of AI now a days?.\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": null,
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"{\"question\": \"How important is it for Data Engineers to learn LLMs?\", \"response\": \"The importance of Data Engineers learning about Large Language Models (LLMs) cannot be overstated, especially given the rapid evolution of AI and its applications across various domains. Here's why this knowledge is essential:\n",
"\n",
"### Key Reasons for Data Engineers to Learn about LLMs:\n",
"\n",
"1. **Integration of AI in Data Pipelines:**\n",
" - As organizations increasingly adopt AI-driven solutions, Data Engineers will need to integrate LLMs into data pipelines for tasks such as text processing, feature extraction, and sentiment analysis.\n",
"\n",
"2. **Understanding Data Requirements:**\n",
" - LLMs require substantial and specific datasets for optimal performance. Knowledge of these requirements will help Data Engineers curate, preprocess, and manage data more effectively.\n",
"\n",
"3. **Enhanced Data Quality:**\n",
" - Data Engineers play a crucial role in ensuring data quality. Understanding LLMs can guide them in implementing effective validation checks and enhancing the data used for training these models.\n",
"\n",
"4. **Collaboration with Data Scientists:**\n",
" - Data Engineers are essential collaborators with Data Scientists. A solid grasp of LLMs will enable them to facilitate better communication and cooperation in model deployment and optimization.\n",
"\n",
"5. **Innovation in Product Development:**\n",
" - Familiarity with LLMs will enable Data Engineers to contribute innovative ideas for new products or features that leverage language processing capabilities, leading to enhanced user experiences.\n",
"\n",
"6. **Staying Current with Industry Trends:**\n",
" - The AI landscape is rapidly changing. Learning about LLMs keeps Data Engineers abreast of current trends and technologies, ensuring they remain competitive in the job market and valuable to their organizations.\n",
"\n",
"7. **Ethical and Responsible AI:**\n",
" - Understanding LLMs involves awareness of their ethical considerations, such as bias and misuse. Data Engineers can advocate for responsible AI practices within their organizations by being educated on these issues.\n",
"\n",
"8. **Scalability Considerations:**\n",
" - Data Engineers will need to design systems that can scale efficiently, especially when dealing with the substantial computational resources required for training and deploying LLMs.\n",
"\n",
"### Conclusion:\n",
"In summary, learning about LLMs is crucial for Data Engineers as it not only enhances their skill set but also positions them to contribute meaningfully to AI initiatives within their organizations. Embracing this knowledge will ultimately drive innovation and efficiency in their data-driven projects.\"}"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"outputs": [],
"source": [
"# Get gpt-4o-mini to answer, with streaming\n",
"def ask_tutor(question):\n",
"def stream_QA(question):\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": question},\n",
" {\"role\": \"user\", \"content\": system_prompt}\n",
" ],\n",
" model= MODEL_GPT,\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_user_prompt(question)}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
"\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
" response = response.replace(\"```\", \"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id = display_handle.display_id)\n",
"\n",
"# call the gpt-4o-mini to answer with streaming\n",
"ask_tutor(user_question)"
"stream_QA(question)"
]
},
{
@ -197,31 +117,24 @@
"outputs": [],
"source": [
"# Get Llama 3.2 to answer\n",
"messages = [\n",
" {\"role\": \"user\", \"content\": user_question}\n",
"]\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"payload = {\n",
" \"model\": MODEL_LLAMA,\n",
" \"messages\": messages,\n",
" \"stream\": True\n",
" }\n",
"\n",
"response = ollama.chat(model=MODEL_LLAMA, messages=messages)\n",
"reply = response['message']['content']\n",
"display(Markdown(reply))\n",
"def ollama_QA(question):\n",
" response = ollama.chat(model=MODEL_LLAMA, messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_user_prompt(question)}\n",
" ])\n",
" return Markdown(response['message']['content'])\n",
"\n",
"# # Process the response stream\n",
"# for line in response.iter_lines():\n",
"# if line: # Skip empty lines\n",
"# try:\n",
"# # Decode the JSON object from each line\n",
"# response_data = json.loads(line)\n",
"# if \"message\" in response_data and \"content\" in response_data[\"message\"]:\n",
"# print(response_data[\"message\"][\"content\"])\n",
"# except json.JSONDecodeError as e:\n",
"# print(f\"Failed to decode JSON: {e}\")\n"
"ollama_QA(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b484100-e5cf-40db-827c-d5618b154654",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

Loading…
Cancel
Save