Browse Source

fix the the output from executing tests

pull/108/head
Elena Shirokova 4 months ago
parent
commit
85a3a1a5fc
  1. 60
      week4/community-contributions/unit-tests-generator.ipynb

60
week4/community-contributions/unit-tests-generator.ipynb

@ -11,16 +11,16 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"!pipenv install pytest pytest-cov"
"#!pipenv install pytest pytest-cov"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@ -63,7 +63,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@ -81,7 +81,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@ -102,10 +102,8 @@
"def execute_coverage_report(python_interpreter=sys.executable):\n",
" if not python_interpreter:\n",
" raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n",
" # test_code_path = Path(\"tests\")\n",
" # command = [\"pytest\", \"-cov\",\"--capture=no\"]\n",
" \n",
" command = [\"coverage\", \"run\", \"-m\", \"pytest\"]\n",
" # command =[\"pytest\", \"--cov=your_package\", \"--cov-report=term-missing\"]\n",
"\n",
" try:\n",
" result = subprocess.run(command, check=True, capture_output=True, text=True)\n",
@ -117,15 +115,7 @@
" print(\"Output:\\n\", e.stdout)\n",
" print(\"Errors:\\n\", e.stderr)\n",
" # Extracting failed test information\n",
" failed_tests = []\n",
" for line in e.stdout.splitlines():\n",
" if \"FAILED\" in line and \"::\" in line:\n",
" failed_tests.append(line.strip())\n",
" if failed_tests:\n",
" print(\"Failed Tests:\")\n",
" for test in failed_tests:\n",
" print(test)\n",
" return failed_tests\n",
" return e.stdout\n",
"\n",
"def save_unit_tests(code):\n",
"\n",
@ -179,7 +169,8 @@
" print(\"Failed Tests:\")\n",
" for test in failed_tests:\n",
" print(test)\n",
" return e.stderr\n",
" \n",
" return e.stdout\n",
" "
]
},
@ -192,7 +183,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@ -201,15 +192,18 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def get_user_prompt(code):\n",
"\n",
" user_prompt = \"Write for a python code the unit test cases.\"\n",
" user_prompt += \"Return unit tests cases using pytest library, do not create any custom imports; do not explain your work other than a few comments.\"\n",
" user_prompt += \"Do not insert the function to be tested in the output before the tests. Validate both the case where the function is executed successfully and where it is expected to fail.\"\n",
" user_prompt += \"Return readable unit tests cases using pytest library, do not create any custom imports, don't forget to import errors if needed; do not explain your work other than a few comments.\"\n",
" user_prompt += \"The tests should include normal inputs, the inputs where the code is expected to fail, edge case and error handling.\"\n",
" user_prompt += \"Do not insert the function to be tested in the output before the tests.\"\n",
" \n",
"\n",
" user_prompt += code\n",
"\n",
" return user_prompt"
@ -217,7 +211,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@ -298,7 +292,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@ -326,7 +320,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@ -349,7 +343,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
@ -406,6 +400,20 @@
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

Loading…
Cancel
Save