From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
273 lines
8.2 KiB
273 lines
8.2 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "markdown", |
|
"id": "fad31e32-2e42-42ae-ae63-c15d90292839", |
|
"metadata": {}, |
|
"source": [ |
|
"# First Project\n", |
|
"Ollama -> Summary\n", |
|
"huggingface_hub -> \"facebook/m2m100_418M\" for translation" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "5fb79a20-a455-4d27-91a1-91958af786c1", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"!pip install transformers datasets torch\n", |
|
"!pip install huggingface_hub" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "e95ac7f2-5192-4f83-acf3-61df30cd3109", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# imports\n", |
|
"import requests\n", |
|
"from bs4 import BeautifulSoup\n", |
|
"import json\n", |
|
"import ollama" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "12276d74-0e79-4e66-9135-1c9d1a80b943", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"class Website:\n", |
|
" def __init__(self, url):\n", |
|
" self.url = url\n", |
|
" response = requests.get(url)\n", |
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
|
" irrelevant.decompose()\n", |
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
|
"\n", |
|
"huggingface_url = \"https://huggingface.co/learn/ml-for-3d-course\"\n", |
|
"huggingface_website = Website(huggingface_url)\n", |
|
"\n", |
|
"huggingface_data = {\n", |
|
" \"title\": huggingface_website.title,\n", |
|
" \"text\": huggingface_website.text\n", |
|
"}\n", |
|
"print(huggingface_data)\n", |
|
"\n", |
|
"with open('ml_for_3d_course_data.json', 'w') as f:\n", |
|
" json.dump(huggingface_data, f)\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "7d74c85c-3e09-4514-bde4-4cafc4910c52", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# huggingface_data 'text' value\n", |
|
"huggingface_text = huggingface_data['text']\n", |
|
"\n", |
|
"# Summary\n", |
|
"response_summary = ollama.chat(model=\"llama3.2:latest\", messages=[{\"role\": \"user\", \"content\": f\"Summarize the following text: {huggingface_text}\"}])\n", |
|
"print(response_summary)\n", |
|
"\n", |
|
"# print summary\n", |
|
"summary_huggingface_text = response_summary.message['content']\n", |
|
"print(\"Summary Text:\", summary_huggingface_text)\n", |
|
"\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "d13764d5-cb76-46c5-bbe6-d132b31a9ea6", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# HuggingFace Translation" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "08405038-4115-487f-9efc-de58572453c1", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"class Website:\n", |
|
" url: str\n", |
|
" title: str\n", |
|
" text: str\n", |
|
"\n", |
|
" def __init__(self, url):\n", |
|
" self.url = url\n", |
|
" response = requests.get(url)\n", |
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
|
" irrelevant.decompose()\n", |
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
|
"\n", |
|
"url = \"https://huggingface.co/learn/ml-for-3d-course\"\n", |
|
"website = Website(url)\n", |
|
"print(website.title) \n", |
|
"print(website.text[:1000])\n", |
|
"\n", |
|
"data = {\n", |
|
" \"title\": website.title,\n", |
|
" \"text\": website.text\n", |
|
"}\n", |
|
"\n", |
|
"with open('ml_for_3d_course_data.json', 'w') as f:\n", |
|
" json.dump(data, f)\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "0632352f-4b16-4125-83bf-f3cc3aabd659", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"print(data)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "a85f8625-725d-4d7f-8cb7-8da4276f81cf", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"!pip install sacremoses" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "c800cea4-f4a4-4e41-9637-31ff11afb256", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"import json\n", |
|
"from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer\n", |
|
"\n", |
|
"# Load the M2M100 model and tokenizer\n", |
|
"model_name = \"facebook/m2m100_418M\"\n", |
|
"model = M2M100ForConditionalGeneration.from_pretrained(model_name)\n", |
|
"tokenizer = M2M100Tokenizer.from_pretrained(model_name)\n", |
|
"\n", |
|
"# Load the saved JSON file\n", |
|
"with open('ml_for_3d_course_data.json', 'r') as f:\n", |
|
" data = json.load(f)\n", |
|
"\n", |
|
"# Extract text from the loaded data\n", |
|
"text = data[\"text\"]\n", |
|
"\n", |
|
"# Set the source language to English and target language to Korean\n", |
|
"source_lang = \"en\"\n", |
|
"target_lang = \"ko\"\n", |
|
"\n", |
|
"# Set the language for tokenizer (important for M2M100)\n", |
|
"tokenizer.src_lang = source_lang\n", |
|
"tokenizer.tgt_lang = target_lang\n", |
|
"\n", |
|
"# Split text into smaller chunks if it's too large\n", |
|
"# This step ensures we don't exceed the model's maximum length (512 tokens)\n", |
|
"max_input_length = 512\n", |
|
"chunks = [text[i:i+max_input_length] for i in range(0, len(text), max_input_length)]\n", |
|
"\n", |
|
"print(chunks)\n", |
|
"# Initialize a list to hold the translated text\n", |
|
"translated_chunks = []\n", |
|
"\n", |
|
"# Iterate through each chunk and translate it\n", |
|
"for chunk in chunks:\n", |
|
" # Tokenize the chunk\n", |
|
" encoded = tokenizer(chunk, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n", |
|
"\n", |
|
" # Generate translation from the model, forcing the output to be in Korean\n", |
|
" generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(target_lang), max_length=512)\n", |
|
"\n", |
|
" # Decode the translated tokens to text\n", |
|
" translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]\n", |
|
" translated_chunks.append(translated_text)\n", |
|
"\n", |
|
"# Combine all translated chunks back together\n", |
|
"final_translated_text = ' '.join(translated_chunks)\n", |
|
"print(\"Translated Text:\", final_translated_text)\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "ffe0f264-a588-422f-a6e1-b60504d1e02c", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"import json\n", |
|
"import requests\n", |
|
"\n", |
|
"# Ollama API URL 설정\n", |
|
"ollama_url = \"http://localhost:11411/v1/models/facebook/m2m100_418M/generate\"\n", |
|
"\n", |
|
"# 저장된 JSON 파일 로드\n", |
|
"with open('ml_for_3d_course_data.json', 'r') as f:\n", |
|
" data = json.load(f)\n", |
|
"\n", |
|
"# 텍스트 추출\n", |
|
"course_text = data[\"text\"]\n", |
|
"\n", |
|
"# 번역할 소스 언어 및 타겟 언어 설정\n", |
|
"source_language = \"en\"\n", |
|
"target_language = \"ko\"\n", |
|
"\n", |
|
"# 데이터 준비\n", |
|
"payload = {\n", |
|
" \"input_text\": course_text,\n", |
|
" \"src_lang\": source_language,\n", |
|
" \"tgt_lang\": target_language\n", |
|
"}\n", |
|
"\n", |
|
"# API 호출\n", |
|
"response = requests.post(ollama_url, json=payload)\n", |
|
"\n", |
|
"# 응답 확인\n", |
|
"if response.status_code == 200:\n", |
|
" translated_course_text = response.json().get(\"translated_text\", \"Translation failed\")\n", |
|
" print(\"Translated Course Text:\", translated_course_text)\n", |
|
"else:\n", |
|
" print(f\"Error {response.status_code}: {response.text}\")\n" |
|
] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
}
|
|
|