From 44ab29f5482c09dfcfce589bc4a9e4794d235007 Mon Sep 17 00:00:00 2001 From: Gharke Date: Thu, 2 May 2024 16:06:38 -0500 Subject: [PATCH] reorg --- .obsidian/app.json | 1 + .obsidian/appearance.json | 3 + .obsidian/core-plugins-migration.json | 30 ++++ .obsidian/core-plugins.json | 20 +++ .obsidian/workspace.json | 160 +++++++++++++++++ ...Lama3 Locally with Ollama and OpenwebUI.md | 145 +++++++++++++++ Generative AI/Setup Local LLM.md | 17 ++ .../Speech to Text with Whisper AI.md | 5 + Linux/Install Docker on Ubuntu.md | 4 + Linux/Other Linux based servers.md | 132 ++++++++++++++ ...allation.md => Stand-up a Linux Server.md} | 169 +++--------------- Linux/ssh.md | 1 + Networking/NGINX Proxy Manager.md | 20 +++ Networking/Self Hosting Web Sites.md | 9 + 14 files changed, 567 insertions(+), 149 deletions(-) create mode 100644 .obsidian/app.json create mode 100644 .obsidian/appearance.json create mode 100644 .obsidian/core-plugins-migration.json create mode 100644 .obsidian/core-plugins.json create mode 100644 .obsidian/workspace.json create mode 100644 Generative AI/How to Run LLama3 Locally with Ollama and OpenwebUI.md create mode 100644 Generative AI/Setup Local LLM.md create mode 100644 Generative AI/Speech to Text with Whisper AI.md create mode 100644 Linux/Install Docker on Ubuntu.md create mode 100644 Linux/Other Linux based servers.md rename Linux/{linux-installation.md => Stand-up a Linux Server.md} (57%) create mode 100644 Networking/NGINX Proxy Manager.md create mode 100644 Networking/Self Hosting Web Sites.md diff --git a/.obsidian/app.json b/.obsidian/app.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/.obsidian/app.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/.obsidian/appearance.json b/.obsidian/appearance.json new file mode 100644 index 0000000..c8c365d --- /dev/null +++ b/.obsidian/appearance.json @@ -0,0 +1,3 @@ +{ + "accentColor": "" +} \ No newline at end of file diff --git a/.obsidian/core-plugins-migration.json b/.obsidian/core-plugins-migration.json new file mode 100644 index 0000000..436f43c --- /dev/null +++ b/.obsidian/core-plugins-migration.json @@ -0,0 +1,30 @@ +{ + "file-explorer": true, + "global-search": true, + "switcher": true, + "graph": true, + "backlink": true, + "canvas": true, + "outgoing-link": true, + "tag-pane": true, + "properties": false, + "page-preview": true, + "daily-notes": true, + "templates": true, + "note-composer": true, + "command-palette": true, + "slash-command": false, + "editor-status": true, + "bookmarks": true, + "markdown-importer": false, + "zk-prefixer": false, + "random-note": false, + "outline": true, + "word-count": true, + "slides": false, + "audio-recorder": false, + "workspaces": false, + "file-recovery": true, + "publish": false, + "sync": false +} \ No newline at end of file diff --git a/.obsidian/core-plugins.json b/.obsidian/core-plugins.json new file mode 100644 index 0000000..9405bfd --- /dev/null +++ b/.obsidian/core-plugins.json @@ -0,0 +1,20 @@ +[ + "file-explorer", + "global-search", + "switcher", + "graph", + "backlink", + "canvas", + "outgoing-link", + "tag-pane", + "page-preview", + "daily-notes", + "templates", + "note-composer", + "command-palette", + "editor-status", + "bookmarks", + "outline", + "word-count", + "file-recovery" +] \ No newline at end of file diff --git a/.obsidian/workspace.json b/.obsidian/workspace.json new file mode 100644 index 0000000..6605dcf --- /dev/null +++ b/.obsidian/workspace.json @@ -0,0 +1,160 @@ +{ + "main": { + "id": "c25544db87a8a7c0", + "type": "split", + "children": [ + { + "id": "1b2d65b5a78a5614", + "type": "tabs", + "children": [ + { + "id": "5d2a67ec854cad2d", + "type": "leaf", + "state": { + "type": "empty", + "state": {} + } + } + ] + } + ], + "direction": "vertical" + }, + "left": { + "id": "32906295cceee2a6", + "type": "split", + "children": [ + { + "id": "1eb5274bb2a6aa60", + "type": "tabs", + "children": [ + { + "id": "361dbb7b2261d3c5", + "type": "leaf", + "state": { + "type": "file-explorer", + "state": { + "sortOrder": "alphabetical" + } + } + }, + { + "id": "29ed2969d0abc83f", + "type": "leaf", + "state": { + "type": "search", + "state": { + "query": "", + "matchingCase": false, + "explainSearch": false, + "collapseAll": false, + "extraContext": false, + "sortOrder": "alphabetical" + } + } + }, + { + "id": "81799efee2f3eb2b", + "type": "leaf", + "state": { + "type": "bookmarks", + "state": {} + } + } + ] + } + ], + "direction": "horizontal", + "width": 300 + }, + "right": { + "id": "10e5d151cc735035", + "type": "split", + "children": [ + { + "id": "c1ff032b3010bcb5", + "type": "tabs", + "children": [ + { + "id": "6e82f906bf19d638", + "type": "leaf", + "state": { + "type": "backlink", + "state": { + "collapseAll": false, + "extraContext": false, + "sortOrder": "alphabetical", + "showSearch": false, + "searchQuery": "", + "backlinkCollapsed": false, + "unlinkedCollapsed": true + } + } + }, + { + "id": "e7c74e549dffa78e", + "type": "leaf", + "state": { + "type": "outgoing-link", + "state": { + "linksCollapsed": false, + "unlinkedCollapsed": true + } + } + }, + { + "id": "8e17d49678bad978", + "type": "leaf", + "state": { + "type": "tag", + "state": { + "sortOrder": "frequency", + "useHierarchy": true + } + } + }, + { + "id": "e46494571e0d3029", + "type": "leaf", + "state": { + "type": "outline", + "state": {} + } + } + ] + } + ], + "direction": "horizontal", + "width": 300, + "collapsed": true + }, + "left-ribbon": { + "hiddenItems": { + "switcher:Open quick switcher": false, + "graph:Open graph view": false, + "canvas:Create new canvas": false, + "daily-notes:Open today's daily note": false, + "templates:Insert template": false, + "command-palette:Open command palette": false + } + }, + "active": "5d2a67ec854cad2d", + "lastOpenFiles": [ + "Generative AI/How to Run LLama3 Locally with Ollama and OpenwebUI.md", + "Docker/docker.md", + "Linux/linux-commands.md", + "Linux/ubuntu-server.md", + "Linux/Stand-up a Linux Server.md", + "Linux/Other Linux based servers.md", + "Linux/Install Docker on Ubuntu.md", + "Networking/NGINX Proxy Manager.md", + "Linux/ssh.md", + "Generative AI/Speech to Text with Whisper AI.md", + "Generative AI/Setup Local LLM.md", + "README.md", + "Generative AI/Setup Local LLM", + "Generative AI", + "Networking/Self Hosting Web Sites.md", + "Networking" + ] +} \ No newline at end of file diff --git a/Generative AI/How to Run LLama3 Locally with Ollama and OpenwebUI.md b/Generative AI/How to Run LLama3 Locally with Ollama and OpenwebUI.md new file mode 100644 index 0000000..231ccc5 --- /dev/null +++ b/Generative AI/How to Run LLama3 Locally with Ollama and OpenwebUI.md @@ -0,0 +1,145 @@ +I’m a big fan of Llama. Meta releasing their LLM open source is a net benefit for the tech community at large, and their permissive license allows most medium and small businesses to use their LLMs with little to no restrictions (within the bounds of the law, of course). Their latest release is Llama 3, which has been highly anticipated. + +Llama 3 comes in two sizes: 8 billion and 70 billion parameters. This kind of model is trained on a massive amount of text data and can be used for a variety of tasks, including generating text, translating languages, writing different kinds of creative content, and answering your questions in an informative way. Meta touts Llama 3 as one of the best open models available, but it is still under development. Here’s the 8B model benchmarks when compared to Mistral and Gemma (according to Meta). + +[![Benchmarks](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fax9r9z2w2zghv81grbh7.png)](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fax9r9z2w2zghv81grbh7.png) + +This begs the question: how can I, the regular individual, run these models locally on my computer? + +## [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#getting-started-with-ollama)Getting Started with Ollama + +That’s where [Ollama](https://ollama.com/) comes in! Ollama is a free and open-source application that allows you to run various large language models, including Llama 3, on your own computer, even with limited resources. Ollama takes advantage of the performance gains of llama.cpp, an open source library designed to allow you to run LLMs locally with relatively low hardware requirements. It also includes a sort of package manager, allowing you to download and use LLMs quickly and effectively with just a single command. + +The first step is [installing Ollama](https://ollama.com/download). It supports all 3 of the major OSes, with [Windows being a “preview”](https://ollama.com/blog/windows-preview) (nicer word for beta). + +Once this is installed, open up your terminal. On all platforms, the command is the same. + +``` +ollama run llama3 +``` + +Wait a few minutes while it downloads and loads the model, and then start chatting! It should bring you to a chat prompt similar to this one. + +``` +ollama run llama3 +>>> Who was the second president of the united states? +The second President of the United States was John Adams. He served from 1797 to 1801, succeeding +George Washington and being succeeded by Thomas Jefferson. + +>>> Who was the 30th? +The 30th President of the United States was Calvin Coolidge! He served from August 2, 1923, to March 4, +1929. + +>>> /bye +``` + +You can chat all day within this terminal chat, but what if you want something more ChatGPT-like? + +## [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#open-webui)Open WebUI + +Open WebUI is an extensible, self-hosted UI that runs entirely inside of [Docker](https://docs.docker.com/desktop/). It can be used either with Ollama or other OpenAI compatible LLMs, like LiteLLM or my own [OpenAI API for Cloudflare Workers](https://github.com/chand1012/openai-cf-workers-ai). + +Assuming you already have [Docker](https://docs.docker.com/desktop/) and Ollama running on your computer, [installation](https://docs.openwebui.com/getting-started/#quick-start-with-docker-) is super simple. + +``` +docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main +``` + +The simply go to [http://localhost:3000](http://localhost:3000/), make an account, and start chatting away! + +[![OpenWebUI Example](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frdi1d35zh09s78o8vqvb.png)](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Frdi1d35zh09s78o8vqvb.png) + +If you didn’t run Llama 3 earlier, you’ll have to pull some models down before you can start chatting. Easiest way to do this is to click the settings icon after clicking your name in the bottom left. + +[![Settings](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftqyetksyn0y4a0p12ylu.png)](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftqyetksyn0y4a0p12ylu.png) + +Then clicking on “models” on the left side of the modal, then pasting in a name of a model from the [Ollama registry](https://ollama.com/models). Here are some models that I’ve used that I recommend for general purposes. + +- `llama3` +- `mistral` +- `llama2` + +[![Models Setting Page](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftxc581jf4w3xszymjfbg.png)](https://media.dev.to/cdn-cgi/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftxc581jf4w3xszymjfbg.png) + +## [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#ollama-api)Ollama API + +If you want to integrate Ollama into your own projects, Ollama offers both its own API as well as an OpenAI Compatible API. The APIs automatically load a locally held LLM into memory, run the inference, then unload after a certain timeout. You do have to pull whatever models you want to use before you can run the model via the API, which can easily be done via the command line. + +``` +ollama pull mistral +``` + +### [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#ollama-api)Ollama API + +Ollama has their own API available, which also has a [couple of SDKs](https://github.com/ollama/ollama?tab=readme-ov-file#libraries) for Javascript and Python. + +Here is how you can do a simple text generation inference with the API. + +``` +curl http://localhost:11434/api/generate -d '{ + "model": "mistral", + "prompt":"Why is the sky blue?" +}' +``` + +And here’s how you can do a Chat generation inference with the API. + +``` +curl http://localhost:11434/api/chat -d '{ + "model": "mistral", + "messages": [ + { "role": "user", "content": "why is the sky blue?" } + ] +}' +``` + +Replace the `model` parameter with whatever model you want to use. See the [official API docs](https://github.com/ollama/ollama/blob/main/docs/api.md) for more information. + +### [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#openai-compatible-api)OpenAI Compatible API + +You can also use Ollama as a drop in replacement (depending on use case) with the OpenAI libraries. Here’s an example from [their documentation](https://github.com/ollama/ollama/blob/main/docs/openai.md). + +``` +# Python +from openai import OpenAI + +client = OpenAI( + base_url='http://localhost:11434/v1/', + + # required but ignored + api_key='ollama', +) + +chat_completion = client.chat.completions.create( + messages=[ + { + 'role': 'user', + 'content': 'Say this is a test', + } + ], + model='mistral', +) +``` + +This also works for Javascript. + +``` +// Javascript +import OpenAI from 'openai' + +const openai = new OpenAI({ + baseURL: 'http://localhost:11434/v1/', + + // required but ignored + apiKey: 'ollama', +}) + +const chatCompletion = await openai.chat.completions.create({ + messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'llama2', +}) +``` + +## [](https://dev.to/timesurgelabs/how-to-run-llama-3-locally-with-ollama-and-open-webui-297d#conclusion)Conclusion + +The release of Meta's Llama 3 and the open-sourcing of its Large Language Model (LLM) technology mark a major milestone for the tech community. With these advanced models now accessible through local tools like Ollama and Open WebUI, ordinary individuals can tap into their immense potential to generate text, translate languages, craft creative writing, and more. Furthermore, the availability of APIs enables developers to seamlessly integrate LLMs into new projects or enhance existing ones. Ultimately, the democratization of LLM technology through open-source initiatives like Llama 3 unlocks a vast realm of innovative possibilities and fuels creativity in the tech industry. \ No newline at end of file diff --git a/Generative AI/Setup Local LLM.md b/Generative AI/Setup Local LLM.md new file mode 100644 index 0000000..5985cff --- /dev/null +++ b/Generative AI/Setup Local LLM.md @@ -0,0 +1,17 @@ + +## Prereqs + +1. +2. Docker, [Install Docker Desktop on Windows | Docker Docs](https://docs.docker.com/desktop/install/windows-install/) + 1. [Install WSL | Microsoft Learn](https://learn.microsoft.com/en-us/windows/wsl/install) +3. Ollama [Download Ollama on Windows](https://ollama.com/download) + + + +## References + +[[How to Run Llama 3 Locally with Ollama and Open WebUI]] +[🏡 Home | Open WebUI](https://docs.openwebui.com/) +[Ollama](https://ollama.com/) +[open-webui/open-webui: User-friendly WebUI for LLMs (Formerly Ollama WebUI) (github.com)](https://github.com/open-webui/open-webui) +[Install Docker Desktop on Windows | Docker Docs](https://docs.docker.com/desktop/install/windows-install/) diff --git a/Generative AI/Speech to Text with Whisper AI.md b/Generative AI/Speech to Text with Whisper AI.md new file mode 100644 index 0000000..037f528 --- /dev/null +++ b/Generative AI/Speech to Text with Whisper AI.md @@ -0,0 +1,5 @@ +## References +[How to Turn Audio to Text using OpenAI Whisper (freecodecamp.org)](https://www.freecodecamp.org/news/how-to-turn-audio-to-text-using-openai-whisper/) +[Introducing Whisper (openai.com)](https://openai.com/research/whisper) +[openai/whisper: Robust Speech Recognition via Large-Scale Weak Supervision (github.com)](https://github.com/openai/whisper) +[WhisperDO/Whisper Install (Offline).md at main · nicholasgcotton/WhisperDO (github.com)](https://github.com/nicholasgcotton/WhisperDO/blob/main/Whisper%20Install%20(Offline).md) \ No newline at end of file diff --git a/Linux/Install Docker on Ubuntu.md b/Linux/Install Docker on Ubuntu.md new file mode 100644 index 0000000..5d360ae --- /dev/null +++ b/Linux/Install Docker on Ubuntu.md @@ -0,0 +1,4 @@ +[Install Docker Engine on Ubuntu | Docker Docs](https://docs.docker.com/engine/install/ubuntu/) + +[Install the Compose plugin | Docker Docs](https://docs.docker.com/compose/install/linux/) + diff --git a/Linux/Other Linux based servers.md b/Linux/Other Linux based servers.md new file mode 100644 index 0000000..8ba1be6 --- /dev/null +++ b/Linux/Other Linux based servers.md @@ -0,0 +1,132 @@ +Optional Maintenance + +### Create new sudo account +Ubuntu Server does this by default +``` +sudo adduser {username} +sudo usermod -aG sudo {username} +``` + +### Disable root +Ubuntu Server does this by default +``` +sudo passwd -l root +``` + +### Change HOSTNAME +``` +sudo hostnamectl set-hostname {hostname} +hostnamectl + +sudo nano /etc/hosts +/* change hostname in hosts file */ +``` + +## Optional Services + +### Install xRDP +If you need to use RDP to access a desktop environment +``` +sudo apt install xrdp +sudo systemctl enable xrdp +``` + +### Install GIT +``` +sudo apt install git +``` + +### Install Guest Agent for Proxmox +Applies only if this box is in a Proxmox server + +1. Open Proxmox +2. Click Options +3. Turn on QEMU Agent + +``` +sudo apt install qemu-guest-agent +sudo shutdown +``` + +### Install Webmin +``` +sudo apt install software-properties-common apt-transport-https +sudo wget -q http://www.webmin.com/jcameron-key.asc -O- | sudo apt-key add - +sudo add-apt-repository "deb [arch=amd64] http://download.webmin.com/download/repository sarge contrib" +sudo apt install webmin +sudo ufw allow 10000/tcp +sudo ufw reload +``` + +### bind9 DNS +``` +sudo apt install -y bind9 bind9utils bind9-doc dnsutils +sudo systemctl start named +sudo systemctl enable named +sudo ufw allow 53 +sudo ufw reload +``` + +### Gitea (GitHub alternative) +``` +sudo apt install git +sudo apt install mariadb-server + +# Login to MySQL +sudo mysql -u root -p + +# Add the git database +CREATE DATABASE gitea; +GRANT ALL PRIVILEGES ON gitea.* TO 'gitea'@'localhost' IDENTIFIED BY "Root"; +FLUSH PRIVILEGES; +QUIT; + +# Install gitea +sudo wget -O /usr/local/bin/gitea https://dl.gitea.io/gitea/1.16.7/gitea-1.16.7-linux-amd64 +sudo chmod +x /usr/local/bin/gitea +gitea --version + +# Create gitea user +sudo adduser --system --shell /bin/bash --gecos 'Git Version Control' --group --disabled-password --home /home/git git +sudo mkdir -pv /var/lib/gitea/{custom,data,log} +sudo chown -Rv git:git /var/lib/gitea +sudo chown -Rv git:git /var/lib/gitea + +sudo mkdir -v /etc/gitea +sudo chown -Rv root:git /etc/gitea +sudo chmod -Rv 770 /etc/gitea +sudo chmod -Rv 770 /etc/gitea + +# Append code to the service file +sudo nano /etc/systemd/system/gitea.service + +[Unit] +Description=Gitea +After=syslog.target +After=network.target +[Service] +RestartSec=3s +Type=simple +User=git +Group=git +WorkingDirectory=/var/lib/gitea/ + +ExecStart=/usr/local/bin/gitea web --config /etc/gitea/app.ini +Restart=always +Environment=USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea +[Install] +WantedBy=multi-user.target + +# Start gitea +sudo systemctl start gitea +sudo systemctl status gitea +sudo systemctl enable gitea + +# Access gitea +http://localhost:3000 + +# If you ever need to change settings like DOMAIN +sudo nano /etc/gitea/app.ini + +``` + diff --git a/Linux/linux-installation.md b/Linux/Stand-up a Linux Server.md similarity index 57% rename from Linux/linux-installation.md rename to Linux/Stand-up a Linux Server.md index 770a521..2815fbc 100644 --- a/Linux/linux-installation.md +++ b/Linux/Stand-up a Linux Server.md @@ -1,23 +1,28 @@ -# Linux Installation - ## Table of Contents -1. [Introduction](#introduction) -2. [Pre Install](#pre-install) - - [Select distro](#select-distro) - - [Download ISO](#download-iso) -3. - +1. [[#Introduction]] +2. [[#Pre Install]] + 1. [[#Select distro]] + 2. [[#Download ISO]] +3. [[#Installation]] +4. [[#Post Install]] + 1. [[#Update && Upgrade]] + 2. [[#Automatic Updates]] + 3. [[#Static IP]] + 4. [[#Install openssh]] + 5. [[#Fix LVM]] + 6. [[#Change timezone]] + 7. [[#Configure Firewall]] + 8. [[#Install fail2ban]] ## Introduction This guide focuses on installing Linux on a hypervisor of some sort. At least, that is how I most often use this guide: as a checklist for building out a server, dev environment, or something else Linux related. That said, these steps are valid for building out a box as well. -As far as a hypervisor, I have an old HP Proliant server that is running Windows Server 2019 with Hyper-V installed and I have a newer Proliant that is running Proxmox. This guide doesn't have the specifics of installing on either hypervisor, but I may add that later. - +As far as a hypervisor, I have an old HP ProLiant server that is running Windows Server 2019 with Hyper-V installed and I have a newer ProLiant that is running Proxmox. This guide doesn't have the specifics of installing on either hypervisor, but I may add that later. ## Pre Install ### Select distro -Asking which distro of linux is the best one is like asking which super-hero is the best in a comic shop. The selection of linux distro hinges upon your requirements and what you intend to accomplish with it. +Asking which distro of Linux is the best one is like asking which super-hero is the best in a comic shop. The selection of Linux distro hinges upon your requirements and what you intend to accomplish with it. The following four are a handful of distros that have remained popular for more than a couple of years. Finding tutorials, YouTube channels, and other assistance is easy for all of these. @@ -36,8 +41,7 @@ Store the ISO in a place that is accessible to the hypervisor where you will ins This section should have two parts. The first would outline the steps for creating a virtual machine on your hypervisor onto which you will install your Linux distro. The second part would outline how to install your distro onto the VM. -Lacking those, I recomment hitting YouTube and the distro's documentation for the step by step to install Linux. It's actually quite easy. - +Lacking those, I recommend hitting YouTube and the distro's documentation for the step by step to install Linux. It's actually quite easy. ## Post Install ### Update && Upgrade @@ -78,6 +82,8 @@ network: sudo netplan apply ``` +Note: in the routes section make sure the to and via line up together. + ### Install openssh You can select this during Ubuntu install @@ -85,7 +91,6 @@ You can select this during Ubuntu install sudo apt install openssh-server ssh-keygen ``` - #### SSH on Client machine LINUX client machine ``` @@ -97,7 +102,6 @@ Windows client ssh-keygen Get-Content $env:USERPROFILE\.ssh\id_rsa.pub | ssh @ "cat >> .ssh/authorized_keys" ``` - #### Lockdown Logins to SSH only ``` sudo nano /etc/ssh/sshd_config @@ -162,137 +166,4 @@ sudo systemctl restart fail2ban sudo fail2ban-client status /* use to review banned */ ``` -## Optional Maintenance - -### Create new sudo account -Ubuntu Server does this by default -``` -sudo adduser {username} -sudo usermod -aG sudo {username} -``` - -### Disable root -Ubuntu Server does this by default -``` -sudo passwd -l root -``` - -### Change HOSTNAME -``` -sudo hostnamectl set-hostname {hostname} -hostnamectl - -sudo nano /etc/hosts -/* change hostname in hosts file */ -``` - -## Optional Services - -### Install xRDP -If you need to use RDP to access a desktop environment -``` -sudo apt install xrdp -sudo systemctl enable xrdp -``` - -### Install GIT -``` -sudo apt install git -``` - -### Install Guest Agent for Proxmox -Applies only if this box is in a Proxmox server - -1. Open Proxmox -2. Click Options -3. Turn on QEMU Agent - -``` -sudo apt install qemu-guest-agent -sudo shutdown -``` - -### Install Webmin -``` -sudo apt install software-properties-common apt-transport-https -sudo wget -q http://www.webmin.com/jcameron-key.asc -O- | sudo apt-key add - -sudo add-apt-repository "deb [arch=amd64] http://download.webmin.com/download/repository sarge contrib" -sudo apt install webmin -sudo ufw allow 10000/tcp -sudo ufw reload -``` - -### bind9 DNS -``` -sudo apt install -y bind9 bind9utils bind9-doc dnsutils -sudo systemctl start named -sudo systemctl enable named -sudo ufw allow 53 -sudo ufw reload -``` - -### Gitea (GitHub alternative) -``` -sudo apt install git -sudo apt install mariadb-server - -# Login to MySQL -sudo mysql -u root -p - -# Add the git database -CREATE DATABASE gitea; -GRANT ALL PRIVILEGES ON gitea.* TO 'gitea'@'localhost' IDENTIFIED BY "Root"; -FLUSH PRIVILEGES; -QUIT; - -# Install gitea -sudo wget -O /usr/local/bin/gitea https://dl.gitea.io/gitea/1.16.7/gitea-1.16.7-linux-amd64 -sudo chmod +x /usr/local/bin/gitea -gitea --version - -# Create gitea user -sudo adduser --system --shell /bin/bash --gecos 'Git Version Control' --group --disabled-password --home /home/git git -sudo mkdir -pv /var/lib/gitea/{custom,data,log} -sudo chown -Rv git:git /var/lib/gitea -sudo chown -Rv git:git /var/lib/gitea - -sudo mkdir -v /etc/gitea -sudo chown -Rv root:git /etc/gitea -sudo chmod -Rv 770 /etc/gitea -sudo chmod -Rv 770 /etc/gitea - -# Append code to the service file -sudo nano /etc/systemd/system/gitea.service - -[Unit] -Description=Gitea -After=syslog.target -After=network.target -[Service] -RestartSec=3s -Type=simple -User=git -Group=git -WorkingDirectory=/var/lib/gitea/ - -ExecStart=/usr/local/bin/gitea web --config /etc/gitea/app.ini -Restart=always -Environment=USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea -[Install] -WantedBy=multi-user.target - -# Start gitea -sudo systemctl start gitea -sudo systemctl status gitea -sudo systemctl enable gitea - -# Access gitea -http://localhost:3000 - -# If you ever need to change settings like DOMAIN -sudo nano /etc/gitea/app.ini - -``` - - -### Logging with Prometheus +## [[Other Linux based servers]] diff --git a/Linux/ssh.md b/Linux/ssh.md index 3e27521..772faa0 100644 --- a/Linux/ssh.md +++ b/Linux/ssh.md @@ -41,3 +41,4 @@ Note about git: I have noticed that you need to generate keys for both your user ### Login to remote server using your key `ssh username@ip_address` + diff --git a/Networking/NGINX Proxy Manager.md b/Networking/NGINX Proxy Manager.md new file mode 100644 index 0000000..53de909 --- /dev/null +++ b/Networking/NGINX Proxy Manager.md @@ -0,0 +1,20 @@ +## Introduction + +This YouTube video is a good step by step to implement Nginx Proxy Manager +https://youtu.be/P3imFC7GSr0?si=RTv_vYqMfi5VCIdR + +Learn about the difference between forward and reverse proxies, and what you would use them for +[What is a reverse proxy? | Proxy servers explained | Cloudflare](https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy/) + +## Prerequisites + +[[Stand-up a Linux Server]] with Docker and Docker Compose installed + +## Software +[Nginx Proxy Manager](https://nginxproxymanager.com/) + +## References + +[Configure port forwarding using FortiGate... - Fortinet Community](https://community.fortinet.com/t5/FortiGate/Technical-Tip-Configure-port-forwarding-using-FortiGate-VIPs/ta-p/196734) +[Adding SSL certs to NGINX docker container - Stack Overflow](https://stackoverflow.com/questions/51399883/adding-ssl-certs-to-nginx-docker-container) +[How to Install an SSL Certificate on Nginx - Mister PKI](https://www.misterpki.com/nginx-ssl-certificate/#:~:text=How%20to%20Install%20an%20SSL%20Certificate%20on%20Nginx,SSL%20configuration%20with%20docker%20...%206%20Conclusion%20) \ No newline at end of file diff --git a/Networking/Self Hosting Web Sites.md b/Networking/Self Hosting Web Sites.md new file mode 100644 index 0000000..cfe4a01 --- /dev/null +++ b/Networking/Self Hosting Web Sites.md @@ -0,0 +1,9 @@ +## Self Hosting Web Sites and Applications +https://youtu.be/GarMdDTAZJo?si=7kXvIBmJHh-le76I + +### Prerequisites + +1. Domain Name +2. Website on a local server +3. Cloudflare account, transfer nameservers from GoDaddy to Cloudflare +4. Nginx Proxy Manager \ No newline at end of file