From 154c3609ee74112697eb7669f20a7b62bc48ae0d Mon Sep 17 00:00:00 2001 From: Petri Alapiessa Date: Thu, 24 Apr 2025 11:15:36 +0300 Subject: [PATCH] Unit tests for pricer-module --- .../pricer_test/README.md | 24 ++---- .../pricer_test/pricer/ci.py | 11 +-- .../pricer_test/requirements.txt | 1 + .../pricer_test/tests/test_lib.py | 5 -- .../pricer_test/tests/test_pricer.py | 84 +++++++++++++++++++ 5 files changed, 98 insertions(+), 27 deletions(-) delete mode 100644 week8/community_contributions/pricer_test/tests/test_lib.py create mode 100644 week8/community_contributions/pricer_test/tests/test_pricer.py diff --git a/week8/community_contributions/pricer_test/README.md b/week8/community_contributions/pricer_test/README.md index 51f8ddc..7ab4ca4 100644 --- a/week8/community_contributions/pricer_test/README.md +++ b/week8/community_contributions/pricer_test/README.md @@ -1,18 +1,5 @@ # Run Continuous Integration (CI) Tests on Modal -Note! -The HF secret in Modal is named "huggingface-secret". Pls rename if your secret has another name. - -## Test modal deployment -You can test pricer.ci in Modal: -(`modal deploy -m pricer.ci`) -In python CLI: -(`import modal`) -(`Pricer = modal.Cls.lookup("pricer-ci-testing", "Pricer")`) -(`pricer = Pricer()`) -(`reply = pricer.price.remote("Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio")`) -(`print(reply)`) - ## Unit testing Unit test strategy created like in [This example repo](https://github.com/modal-labs/ci-on-modal) @@ -20,11 +7,12 @@ Unit test strategy created like in ## Usage All commands below are run from the root of the repository (this directory). +_Note_: I removed modal-decorators from pricer.ci-module to be able to run unit tests. ### Run tests remotely on Modal ```bash -modal run pricer.ci +modal run pricer.ci::pytest ``` On the first execution, the [container image](https://modal.com/docs/guide/custom-container) @@ -39,9 +27,15 @@ To debug the tests, you can open a shell in the exact same environment that the tests are run in: ```bash -modal shell pricer.ci +modal shell pricer.ci::pytest ``` _Note_: On the Modal worker, the `pytest` command is run from the home directory, `/root`, which contains the `tests` folder, but the `modal shell` command will drop you at the top of the filesystem, `/`. + +To run test: +```bash +cd root +pytest +``` \ No newline at end of file diff --git a/week8/community_contributions/pricer_test/pricer/ci.py b/week8/community_contributions/pricer_test/pricer/ci.py index 5037646..2b337cb 100644 --- a/week8/community_contributions/pricer_test/pricer/ci.py +++ b/week8/community_contributions/pricer_test/pricer/ci.py @@ -39,17 +39,16 @@ FINETUNED_DIR = MODEL_DIR + FINETUNED_MODEL QUESTION = "How much does this cost to the nearest dollar?" PREFIX = "Price is $" -@app.cls(image=image, secrets=secrets, gpu=GPU, timeout=1800) + class Pricer: - @modal.build() def download_model_to_folder(self): from huggingface_hub import snapshot_download import os os.makedirs(MODEL_DIR, exist_ok=True) - snapshot_download(BASE_MODEL, local_dir=BASE_DIR) - snapshot_download(FINETUNED_MODEL, revision=REVISION, local_dir=FINETUNED_DIR) + print(f"Using this HF Token: {hf_token}") + snapshot_download(BASE_MODEL, local_dir=BASE_DIR, use_auth_token=hf_token) + snapshot_download(FINETUNED_MODEL, revision=REVISION, local_dir=FINETUNED_DIR, use_auth_token=hf_token) - @modal.enter() def setup(self): import os import torch @@ -78,7 +77,6 @@ class Pricer: self.fine_tuned_model = PeftModel.from_pretrained(self.base_model, FINETUNED_DIR, revision=REVISION) - @modal.method() def price(self, description: str) -> float: import os import re @@ -98,6 +96,5 @@ class Pricer: match = re.search(r"[-+]?\d*\.\d+|\d+", contents) return float(match.group()) if match else 0 - @modal.method() def wake_up(self) -> str: return "ok" diff --git a/week8/community_contributions/pricer_test/requirements.txt b/week8/community_contributions/pricer_test/requirements.txt index 4612978..409ce64 100644 --- a/week8/community_contributions/pricer_test/requirements.txt +++ b/week8/community_contributions/pricer_test/requirements.txt @@ -4,3 +4,4 @@ transformers bitsandbytes accelerate peft +dotenv diff --git a/week8/community_contributions/pricer_test/tests/test_lib.py b/week8/community_contributions/pricer_test/tests/test_lib.py deleted file mode 100644 index e3046e7..0000000 --- a/week8/community_contributions/pricer_test/tests/test_lib.py +++ /dev/null @@ -1,5 +0,0 @@ -from my_pkg.lib import has_gpu - - -def test_torch_cuda(): - assert has_gpu() diff --git a/week8/community_contributions/pricer_test/tests/test_pricer.py b/week8/community_contributions/pricer_test/tests/test_pricer.py new file mode 100644 index 0000000..027643e --- /dev/null +++ b/week8/community_contributions/pricer_test/tests/test_pricer.py @@ -0,0 +1,84 @@ +import pdb +from pricer.ci import Pricer +from unittest.mock import patch, MagicMock +import torch +import pytest +from transformers import BitsAndBytesConfig + +BASE_MODEL = "meta-llama/Meta-Llama-3.1-8B" +PROJECT_NAME = "pricer" +HF_USER = "ed-donner" # your HF name here! Or use mine if you just want to reproduce my results. +RUN_NAME = "2024-09-13_13.04.39" +PROJECT_RUN_NAME = f"{PROJECT_NAME}-{RUN_NAME}" +REVISION = "e8d637df551603dc86cd7a1598a8f44af4d7ae36" +FINETUNED_MODEL = f"{HF_USER}/{PROJECT_RUN_NAME}" +MODEL_DIR = "hf-cache/" +BASE_DIR = MODEL_DIR + BASE_MODEL +FINETUNED_DIR = MODEL_DIR + FINETUNED_MODEL + +@pytest.fixture +def pricer(): + return Pricer() + +def test_wake_up(): + pricer = Pricer() + assert pricer.wake_up() == "ok" + + +@patch('transformers.AutoTokenizer') +@patch('peft.PeftModel') +@patch('transformers.AutoModelForCausalLM') +def test_setup(MockAutoModel, MockPeftModel, MockAutoTokenizer, pricer): + # Setup mocks + mock_tokenizer = MockAutoTokenizer.from_pretrained.return_value + mock_model = MockAutoModel.from_pretrained.return_value + mock_peft_model = MockPeftModel.from_pretrained.return_value + + # Call the setup method + pricer.setup() + + # Assertions to ensure the setup method works correctly + MockAutoTokenizer.from_pretrained.assert_called_once_with(BASE_DIR) + assert pricer.tokenizer == mock_tokenizer + assert pricer.tokenizer.pad_token == pricer.tokenizer.eos_token + assert pricer.tokenizer.padding_side == "right" + + quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_quant_type="nf4" + ) + + MockAutoModel.from_pretrained.assert_called_once_with( + BASE_DIR, + quantization_config=quant_config, + device_map="auto" + ) + assert pricer.base_model == mock_model + + MockPeftModel.from_pretrained.assert_called_once_with(mock_model, FINETUNED_DIR, revision=REVISION) + assert pricer.fine_tuned_model == mock_peft_model + + +@patch('transformers.AutoTokenizer') +@patch('peft.PeftModel') +def test_price(MockPeftModel, MockAutoTokenizer, pricer): + # Setup mocks + mock_tokenizer = MockAutoTokenizer.return_value + mock_tokenizer.encode.return_value = torch.tensor([[1, 2, 3]]) + mock_tokenizer.decode.return_value = "Price is $123.45" + + mock_model = MockPeftModel.return_value + mock_model.generate.return_value = torch.tensor([[1, 2, 3, 4, 5]]) + + # Assign mocks to the pricer instance + pricer.tokenizer = mock_tokenizer + pricer.fine_tuned_model = mock_model + + # Call the method + description = "Test description" + result = pricer.price(description) + + # Assert the result + assert result == 123.45