From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
433 lines
12 KiB
433 lines
12 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 2, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# imports\n", |
|
"\n", |
|
"import os\n", |
|
"import glob\n", |
|
"from dotenv import load_dotenv\n", |
|
"import gradio as gr\n", |
|
"# import gemini\n", |
|
"import google.generativeai" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 18, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# imports for langchain\n", |
|
"\n", |
|
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
|
"from langchain.text_splitter import CharacterTextSplitter\n", |
|
"from langchain.schema import Document\n", |
|
"# from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
|
"from langchain_chroma import Chroma\n", |
|
"from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI\n", |
|
"import numpy as np\n", |
|
"from sklearn.manifold import TSNE\n", |
|
"import plotly.graph_objects as go\n", |
|
"from langchain.memory import ConversationBufferMemory\n", |
|
"from langchain.chains import ConversationalRetrievalChain" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 4, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# price is a factor for our company, so we're going to use a low cost model\n", |
|
"\n", |
|
"MODEL = \"gemini-1.5-flash\"\n", |
|
"db_name = \"vector_db\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 5, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Load environment variables in a file called .env\n", |
|
"\n", |
|
"load_dotenv()\n", |
|
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 6, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"google.generativeai.configure()" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 7, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Read in documents using LangChain's loaders\n", |
|
"# Take everything in all the sub-folders of our knowledgebase\n", |
|
"\n", |
|
"folders = glob.glob(\"knowledge-base/*\")\n", |
|
"\n", |
|
"# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", |
|
"text_loader_kwargs = {'encoding': 'utf-8'}\n", |
|
"# If that doesn't work, some Windows users might need to uncomment the next line instead\n", |
|
"# text_loader_kwargs={'autodetect_encoding': True}\n", |
|
"\n", |
|
"documents = []\n", |
|
"for folder in folders:\n", |
|
" doc_type = os.path.basename(folder)\n", |
|
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
|
" folder_docs = loader.load()\n", |
|
" for doc in folder_docs:\n", |
|
" doc.metadata[\"doc_type\"] = doc_type\n", |
|
" documents.append(doc)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 8, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stderr", |
|
"output_type": "stream", |
|
"text": [ |
|
"Created a chunk of size 1088, which is longer than the specified 1000\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", |
|
"chunks = text_splitter.split_documents(documents)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 9, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"data": { |
|
"text/plain": [ |
|
"123" |
|
] |
|
}, |
|
"execution_count": 9, |
|
"metadata": {}, |
|
"output_type": "execute_result" |
|
} |
|
], |
|
"source": [ |
|
"len(chunks)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 10, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"Document types found: company, contracts, employees, products\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"doc_types = set(chunk.metadata['doc_type'] for chunk in chunks)\n", |
|
"print(f\"Document types found: {', '.join(doc_types)}\")" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 11, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"Vectorstore created with 123 documents\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\")\n", |
|
"\n", |
|
"# Check if a Chroma Datastore already exists - if so, delete the collection to start from scratch\n", |
|
"\n", |
|
"if os.path.exists(db_name):\n", |
|
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
|
"\n", |
|
"# Create our Chroma vectorstore!\n", |
|
"\n", |
|
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
|
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 12, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"The vectors have 768 dimensions\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"# Get one vector and find how many dimensions it has\n", |
|
"\n", |
|
"collection = vectorstore._collection\n", |
|
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n", |
|
"dimensions = len(sample_embedding)\n", |
|
"print(f\"The vectors have {dimensions:,} dimensions\")" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 13, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Prework\n", |
|
"\n", |
|
"result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", |
|
"vectors = np.array(result['embeddings'])\n", |
|
"documents = result['documents']\n", |
|
"doc_types = [metadata['doc_type'] for metadata in result['metadatas']]\n", |
|
"colors = [['blue', 'green', 'red', 'orange'][['products', 'employees', 'contracts', 'company'].index(t)] for t in doc_types]" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# We humans find it easier to visalize things in 2D!\n", |
|
"# Reduce the dimensionality of the vectors to 2D using t-SNE\n", |
|
"# (t-distributed stochastic neighbor embedding)\n", |
|
"\n", |
|
"tsne = TSNE(n_components=2, random_state=42)\n", |
|
"reduced_vectors = tsne.fit_transform(vectors)\n", |
|
"\n", |
|
"# Create the 2D scatter plot\n", |
|
"fig = go.Figure(data=[go.Scatter(\n", |
|
" x=reduced_vectors[:, 0],\n", |
|
" y=reduced_vectors[:, 1],\n", |
|
" mode='markers',\n", |
|
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n", |
|
" hoverinfo='text'\n", |
|
")])\n", |
|
"\n", |
|
"fig.update_layout(\n", |
|
" title='2D Chroma Vector Store Visualization',\n", |
|
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
|
" width=800,\n", |
|
" height=600,\n", |
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
|
")\n", |
|
"\n", |
|
"fig.show()" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Let's try 3D!\n", |
|
"\n", |
|
"tsne = TSNE(n_components=3, random_state=42)\n", |
|
"reduced_vectors = tsne.fit_transform(vectors)\n", |
|
"\n", |
|
"# Create the 3D scatter plot\n", |
|
"fig = go.Figure(data=[go.Scatter3d(\n", |
|
" x=reduced_vectors[:, 0],\n", |
|
" y=reduced_vectors[:, 1],\n", |
|
" z=reduced_vectors[:, 2],\n", |
|
" mode='markers',\n", |
|
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n", |
|
" hoverinfo='text'\n", |
|
")])\n", |
|
"\n", |
|
"fig.update_layout(\n", |
|
" title='3D Chroma Vector Store Visualization',\n", |
|
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
|
" width=900,\n", |
|
" height=700,\n", |
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
|
")\n", |
|
"\n", |
|
"fig.show()" |
|
] |
|
}, |
|
{ |
|
"cell_type": "markdown", |
|
"metadata": {}, |
|
"source": [ |
|
"RAG pipeline using langchain" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 19, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stderr", |
|
"output_type": "stream", |
|
"text": [ |
|
"C:\\Users\\GANESH\\AppData\\Local\\Temp\\ipykernel_524\\4130109764.py:5: LangChainDeprecationWarning:\n", |
|
"\n", |
|
"Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n", |
|
"\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"# create a new Chat with ChatGoogleGenerativeAI\n", |
|
"llm = ChatGoogleGenerativeAI(model=MODEL, temperature=0.7)\n", |
|
"\n", |
|
"# set up the conversation memory for the chat\n", |
|
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
|
"\n", |
|
"# the retriever is an abstraction over the VectorStore that will be used during RAG\n", |
|
"retriever = vectorstore.as_retriever()\n", |
|
"\n", |
|
"# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n", |
|
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 20, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"Insurellm is an insurance technology company with 200 employees and over 300 clients worldwide. They offer four software products, including Homellm, a portal for home insurance companies that integrates with existing platforms and offers a customer portal for policy management. Their pricing model is based on provider size and customization needs.\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"query = \"Can you describe Insurellm in a few sentences\"\n", |
|
"result = conversation_chain.invoke({\"question\":query})\n", |
|
"print(result[\"answer\"])" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 21, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# set up a new conversation memory for the chat\n", |
|
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
|
"\n", |
|
"# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n", |
|
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "markdown", |
|
"metadata": {}, |
|
"source": [ |
|
"Gradio User Interface" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 22, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"def chat(message, history):\n", |
|
" result = conversation_chain.invoke({\"question\": message})\n", |
|
" return result[\"answer\"]" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 23, |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"* Running on local URL: http://127.0.0.1:7860\n", |
|
"\n", |
|
"To create a public link, set `share=True` in `launch()`.\n" |
|
] |
|
}, |
|
{ |
|
"data": { |
|
"text/html": [ |
|
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
|
], |
|
"text/plain": [ |
|
"<IPython.core.display.HTML object>" |
|
] |
|
}, |
|
"metadata": {}, |
|
"output_type": "display_data" |
|
} |
|
], |
|
"source": [ |
|
"view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "llms", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 2 |
|
}
|
|
|