From 4fec4fcf761e5492ae12ed59244ea84a57dccc53 Mon Sep 17 00:00:00 2001 From: dinorrusso <36603919+dinorrusso@users.noreply.github.com> Date: Sun, 5 Jan 2025 15:33:42 -0500 Subject: [PATCH] Added week5 day5 RAG example using Ollama all local --- .../day 5 - ollama_rag_1.ipynb | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 week5/community-contributions/day 5 - ollama_rag_1.ipynb diff --git a/week5/community-contributions/day 5 - ollama_rag_1.ipynb b/week5/community-contributions/day 5 - ollama_rag_1.ipynb new file mode 100644 index 0000000..18f002f --- /dev/null +++ b/week5/community-contributions/day 5 - ollama_rag_1.ipynb @@ -0,0 +1,224 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # Document loading, retrieval methods and text splitting\n", + "# !pip install -qU langchain langchain_community\n", + "\n", + "# # Local vector store via Chroma\n", + "# !pip install -qU langchain_chroma\n", + "\n", + "# # Local inference and embeddings via Ollama\n", + "# !pip install -qU langchain_ollama\n", + "\n", + "# # Web Loader\n", + "# !pip install -qU beautifulsoup4\n", + "\n", + "# # Pull the model first\n", + "# !ollama pull nomic-embed-text\n", + "\n", + "# !pip install -qU pypdf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Imports\n", + "import os\n", + "import glob\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n", + "from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader\n", + "from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter\n", + "from langchain_chroma import Chroma\n", + "from langchain_ollama import OllamaEmbeddings\n", + "from langchain_ollama import ChatOllama\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Read in documents using LangChain's loaders\n", + "# Take everything in all the sub-folders of our knowledgebase\n", + "\n", + "folders = glob.glob(\"Manuals/*\")\n", + "\n", + "def add_metadata(doc, doc_type):\n", + " doc.metadata[\"doc_type\"] = doc_type\n", + " return doc\n", + "\n", + "documents = []\n", + "for folder in folders:\n", + " doc_type = os.path.basename(folder)\n", + " loader = DirectoryLoader(folder, glob=\"**/*.pdf\", loader_cls=PyPDFLoader)\n", + " folder_docs = loader.load()\n", + " documents.extend([add_metadata(doc, doc_type) for doc in folder_docs])\n", + "\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", + "chunks = text_splitter.split_documents(documents)\n", + "\n", + "print(f\"Total number of chunks: {len(chunks)}\")\n", + "print(f\"Document types found: {set(doc.metadata['doc_type'] for doc in documents)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", + "# Chroma is a popular open source Vector Database based on SQLLite\n", + "DB_NAME = \"vector_db\"\n", + "\n", + "embeddings = OllamaEmbeddings(model=\"nomic-embed-text\")\n", + "\n", + "# Delete if already exists\n", + "\n", + "if os.path.exists(DB_NAME):\n", + " Chroma(persist_directory=DB_NAME, embedding_function=embeddings).delete_collection()\n", + "\n", + "# Create vectorstore\n", + "\n", + "vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=DB_NAME)\n", + "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#run a quick test - should return a list of documents = 4\n", + "question = \"What kind of grill is the Spirt II?\"\n", + "docs = vectorstore.similarity_search(question)\n", + "len(docs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create a new Chat with Ollama\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chains import ConversationalRetrievalChain\n", + "MODEL = \"llama3.2:latest\"\n", + "llm = ChatOllama(temperature=0.7, model=MODEL)\n", + "\n", + "# set up the conversation memory for the chat\n", + "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + "\n", + "# the retriever is an abstraction over the VectorStore that will be used during RAG\n", + "retriever = vectorstore.as_retriever()\n", + "\n", + "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", + "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try a simple question\n", + "\n", + "query = \"How do I change the water bottle ?\"\n", + "result = conversation_chain.invoke({\"question\": query})\n", + "print(result[\"answer\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# set up a new conversation memory for the chat\n", + "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + "\n", + "# putting it together: set up the conversation chain with the LLM, the vector store and memory\n", + "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Wrapping that in a function\n", + "\n", + "def chat(question, history):\n", + " result = conversation_chain.invoke({\"question\": question})\n", + " return result[\"answer\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Now we will bring this up in Gradio using the Chat interface -\n", + "\n", + "A quick and easy way to prototype a chat with an LLM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# And in Gradio:\n", + "\n", + "view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}