diff --git a/.github/workflows/publish-to-production.yml b/.github/workflows/publish-to-production.yml
new file mode 100644
index 00000000..7e288674
--- /dev/null
+++ b/.github/workflows/publish-to-production.yml
@@ -0,0 +1,34 @@
+name: Publish to Production
+on:
+ release:
+ types: [published]
+jobs:
+ build-n-publish:
+ name: Publish to PyPI. Build and publish Python 🐍 distributions 📦
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@master
+ - name: Poetry Setup
+ uses: snok/install-poetry@v1
+ with:
+ version: 1.1.13
+ - name: Build and publish to pypi
+ run: |
+ poetry build
+ poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
+ poetry publish
+ tweet-new-release:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: ethomson/send-tweet-action@v1
+ with:
+ status: |
+ ScanAPI version ${{github.ref_name}} released 🚀
+
+ 📃 Find out more in: https://github.com/scanapi/scanapi/releases/tag/${{github.ref_name}}
+
+ #ScanAPI #OpenSource #Release #API
+ consumer-key: ${{ secrets.TWITTER_CONSUMER_API_KEY }}
+ consumer-secret: ${{ secrets.TWITTER_CONSUMER_API_SECRET }}
+ access-token: ${{ secrets.TWITTER_ACCESS_TOKEN }}
+ access-token-secret: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml
deleted file mode 100644
index fcf535af..00000000
--- a/.github/workflows/publish-to-pypi.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Publish to PyPI
-on:
- release:
- types: [published]
-jobs:
- build-n-publish:
- name: PyPI - Build and publish Python 🐍 distributions 📦
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@master
- - name: Poetry Setup
- uses: snok/install-poetry@v1
- with:
- version: 1.1.13
- - name: Build and publish to pypi
- run: |
- poetry build
- poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
- poetry publish
\ No newline at end of file
diff --git a/.github/workflows/run-examples.yml b/.github/workflows/run-examples.yml
index 9aca0c73..98555cb5 100644
--- a/.github/workflows/run-examples.yml
+++ b/.github/workflows/run-examples.yml
@@ -1,4 +1,8 @@
-on: pull_request
+on:
+ pull_request:
+ push:
+ branches:
+ - main
name: ScanAPI Examples
jobs:
poke-api:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fde335c2..345b542b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [2.6.2] - 2022-06-01
+### Added
+- Print test results summary in console [#497](https://github.com/scanapi/scanapi/issues/497)
+
+### Fixed
+- Error when using list (or any other) comprehension in Python Code [#515](https://github.com/scanapi/scanapi/pull/515)
+
## [2.6.1] - 2022-04-12
### Changed
- Implement new details to help users on visualize related request data. [#506](https://github.com/scanapi/scanapi/pull/506)
@@ -235,29 +242,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
- Fix vars interpolation.
-[Unreleased]: https://github.com/camilamaia/scanapi/compare/v2.6.1...HEAD
-[2.6.1]: https://github.com/camilamaia/scanapi/compare/v2.6.0...v2.6.1
-[2.6.0]: https://github.com/camilamaia/scanapi/compare/v2.5.0...v2.6.0
-[2.5.0]: https://github.com/camilamaia/scanapi/compare/v2.4.0...v2.5.0
-[2.4.0]: https://github.com/camilamaia/scanapi/compare/v2.3.0...v2.4.0
-[2.3.0]: https://github.com/camilamaia/scanapi/compare/v2.2.0...v2.3.0
-[2.2.0]: https://github.com/camilamaia/scanapi/compare/v2.1.0...v2.2.0
-[2.1.0]: https://github.com/camilamaia/scanapi/compare/v2.0.0...v2.1.0
-[2.0.0]: https://github.com/camilamaia/scanapi/compare/v1.0.5...v2.0.0
-[1.0.5]: https://github.com/camilamaia/scanapi/compare/v1.0.4...v1.0.5
-[1.0.4]: https://github.com/camilamaia/scanapi/compare/v1.0.3...v1.0.4
-[1.0.3]: https://github.com/camilamaia/scanapi/compare/v1.0.2...v1.0.3
-[1.0.2]: https://github.com/camilamaia/scanapi/compare/v1.0.1...v1.0.2
-[1.0.1]: https://github.com/camilamaia/scanapi/compare/v1.0.0...v1.0.1
-[1.0.0]: https://github.com/camilamaia/scanapi/compare/v0.1.0...v1.0.0
-[0.1.0]: https://github.com/camilamaia/scanapi/compare/v0.0.19...v0.1.0
-[0.0.19]: https://github.com/camilamaia/scanapi/compare/v0.0.18...v0.0.19
-[0.0.18]: https://github.com/camilamaia/scanapi/compare/v0.0.17...v0.0.18
-[0.0.17]: https://github.com/camilamaia/scanapi/compare/v0.0.16...v0.0.17
-[0.0.16]: https://github.com/camilamaia/scanapi/compare/v0.0.15...v0.0.16
-[0.0.15]: https://github.com/camilamaia/scanapi/compare/v0.0.14...v0.0.15
-[0.0.14]: https://github.com/camilamaia/scanapi/compare/v0.0.13...v0.0.14
-[0.0.13]: https://github.com/camilamaia/scanapi/compare/v0.0.12...v0.0.13
-[0.0.12]: https://github.com/camilamaia/scanapi/compare/v0.0.11...v0.0.12
-[0.0.11]: https://github.com/camilamaia/scanapi/compare/v0.0.10...v0.0.11
-[0.0.10]: https://github.com/camilamaia/scanapi/releases/tag/v0.0.10
+[Unreleased]: https://github.com/scanapi/scanapi/compare/v2.6.2...HEAD
+[2.6.2]: https://github.com/scanapi/scanapi/compare/v2.6.1...v2.6.2
+[2.6.1]: https://github.com/scanapi/scanapi/compare/v2.6.0...v2.6.1
+[2.6.0]: https://github.com/scanapi/scanapi/compare/v2.5.0...v2.6.0
+[2.5.0]: https://github.com/scanapi/scanapi/compare/v2.4.0...v2.5.0
+[2.4.0]: https://github.com/scanapi/scanapi/compare/v2.3.0...v2.4.0
+[2.3.0]: https://github.com/scanapi/scanapi/compare/v2.2.0...v2.3.0
+[2.2.0]: https://github.com/scanapi/scanapi/compare/v2.1.0...v2.2.0
+[2.1.0]: https://github.com/scanapi/scanapi/compare/v2.0.0...v2.1.0
+[2.0.0]: https://github.com/scanapi/scanapi/compare/v1.0.5...v2.0.0
+[1.0.5]: https://github.com/scanapi/scanapi/compare/v1.0.4...v1.0.5
+[1.0.4]: https://github.com/scanapi/scanapi/compare/v1.0.3...v1.0.4
+[1.0.3]: https://github.com/scanapi/scanapi/compare/v1.0.2...v1.0.3
+[1.0.2]: https://github.com/scanapi/scanapi/compare/v1.0.1...v1.0.2
+[1.0.1]: https://github.com/scanapi/scanapi/compare/v1.0.0...v1.0.1
+[1.0.0]: https://github.com/scanapi/scanapi/compare/v0.1.0...v1.0.0
+[0.1.0]: https://github.com/scanapi/scanapi/compare/v0.0.19...v0.1.0
+[0.0.19]: https://github.com/scanapi/scanapi/compare/v0.0.18...v0.0.19
+[0.0.18]: https://github.com/scanapi/scanapi/compare/v0.0.17...v0.0.18
+[0.0.17]: https://github.com/scanapi/scanapi/compare/v0.0.16...v0.0.17
+[0.0.16]: https://github.com/scanapi/scanapi/compare/v0.0.15...v0.0.16
+[0.0.15]: https://github.com/scanapi/scanapi/compare/v0.0.14...v0.0.15
+[0.0.14]: https://github.com/scanapi/scanapi/compare/v0.0.13...v0.0.14
+[0.0.13]: https://github.com/scanapi/scanapi/compare/v0.0.12...v0.0.13
+[0.0.12]: https://github.com/scanapi/scanapi/compare/v0.0.11...v0.0.12
+[0.0.11]: https://github.com/scanapi/scanapi/compare/v0.0.10...v0.0.11
+[0.0.10]: https://github.com/scanapi/scanapi/releases/tag/v0.0.10
diff --git a/Dockerfile b/Dockerfile
index 80daba5b..e3b72021 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM python:3.10.0a6-slim
+FROM python:3.10.4-bullseye
LABEL maintainer="github.com/camilamaia"
@@ -6,7 +6,7 @@ ENV PATH="~/.local/bin:${PATH}"
RUN pip install pip setuptools --upgrade
-RUN pip install scanapi==2.6.1
+RUN pip install scanapi==2.6.2
COPY . /app
diff --git a/README.md b/README.md
index 8ae9db3c..eedc03af 100644
--- a/README.md
+++ b/README.md
@@ -10,6 +10,9 @@
+
+
+
@@ -84,15 +87,10 @@ Then, the lib will hit the specified endpoints and generate a `scanapi-report.ht
alt="An overview screenshot of the report."
>
-
## Documentation
diff --git a/images/report-print-closed.png b/images/report-print-closed.png
index f51ed604..605fad66 100644
Binary files a/images/report-print-closed.png and b/images/report-print-closed.png differ
diff --git a/images/report-print-opened.png b/images/report-print-opened.png
new file mode 100644
index 00000000..145d5956
Binary files /dev/null and b/images/report-print-opened.png differ
diff --git a/images/report-print-request.png b/images/report-print-request.png
deleted file mode 100644
index 73ded489..00000000
Binary files a/images/report-print-request.png and /dev/null differ
diff --git a/images/report-print-response.png b/images/report-print-response.png
deleted file mode 100644
index 62a285fe..00000000
Binary files a/images/report-print-response.png and /dev/null differ
diff --git a/poetry.lock b/poetry.lock
index 92f1fc9b..b3bbc273 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -164,6 +164,17 @@ category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+[[package]]
+name = "commonmark"
+version = "0.9.1"
+description = "Python parser for the CommonMark Markdown spec"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.extras]
+test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"]
+
[[package]]
name = "coverage"
version = "5.5"
@@ -187,6 +198,14 @@ python-versions = ">=3.6.2,<4.0"
requests = ">=2.24.0,<3.0.0"
responses = ">=0.12.0,<0.13.0"
+[[package]]
+name = "dataclasses"
+version = "0.8"
+description = "A backport of the dataclasses module for Python 3.6"
+category = "main"
+optional = false
+python-versions = ">=3.6, <3.7"
+
[[package]]
name = "distlib"
version = "0.3.2"
@@ -516,7 +535,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
name = "pygments"
version = "2.9.0"
description = "Pygments is a syntax highlighting package written in Python."
-category = "dev"
+category = "main"
optional = false
python-versions = ">=3.5"
@@ -685,6 +704,23 @@ urllib3 = ">=1.25.10"
[package.extras]
tests = ["coverage (>=3.7.1,<6.0.0)", "pytest-cov", "pytest-localserver", "flake8", "pytest (>=4.6,<5.0)", "pytest (>=4.6)"]
+[[package]]
+name = "rich"
+version = "12.1.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+category = "main"
+optional = false
+python-versions = ">=3.6.2,<4.0.0"
+
+[package.dependencies]
+commonmark = ">=0.9.0,<0.10.0"
+dataclasses = {version = ">=0.7,<0.9", markers = "python_version < \"3.7\""}
+pygments = ">=2.6.0,<3.0.0"
+typing-extensions = {version = ">=3.7.4,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+
[[package]]
name = "sh"
version = "1.14.1"
@@ -890,7 +926,7 @@ python-versions = "*"
name = "typing-extensions"
version = "3.10.0.0"
description = "Backported and Experimental Type Hints for Python 3.5+"
-category = "dev"
+category = "main"
optional = false
python-versions = "*"
@@ -943,7 +979,7 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytes
[metadata]
lock-version = "1.1"
python-versions = ">=3.6.2,<4.0.0"
-content-hash = "a9da3cb8306ab6d934e99d96cedfe429e3852dd1c5b6867510a5965d71cd9837"
+content-hash = "018a2b62aa00f9284f820b3e2d13e1f39df9d0da3202a7304ecdde79942ab710"
[metadata.files]
alabaster = [
@@ -1007,6 +1043,10 @@ colorama = [
{file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
{file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
]
+commonmark = [
+ {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"},
+ {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"},
+]
coverage = [
{file = "coverage-5.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf"},
{file = "coverage-5.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b"},
@@ -1065,6 +1105,10 @@ curlify2 = [
{file = "curlify2-1.0.1-py3-none-any.whl", hash = "sha256:d3ef85a84a6ddd0967e7105375a1b4e29fa1ddd8e77428dfaa884c7640ee380d"},
{file = "curlify2-1.0.1.tar.gz", hash = "sha256:2a9dc7b8902e1301f3600ccfa61905e9812583dba83798310db678fb9ed6e256"},
]
+dataclasses = [
+ {file = "dataclasses-0.8-py3-none-any.whl", hash = "sha256:0201d89fa866f68c8ebd9d08ee6ff50c0b255f8ec63a71c16fda7af82bb887bf"},
+ {file = "dataclasses-0.8.tar.gz", hash = "sha256:8479067f342acf957dc82ec415d355ab5edb7e7646b90dc6e2fd1d96ad084c97"},
+]
distlib = [
{file = "distlib-0.3.2-py2.py3-none-any.whl", hash = "sha256:23e223426b28491b1ced97dc3bbe183027419dfc7982b4fa2f05d5f3ff10711c"},
{file = "distlib-0.3.2.zip", hash = "sha256:106fef6dc37dd8c0e2c0a60d3fca3e77460a48907f335fa28420463a6f799736"},
@@ -1378,6 +1422,10 @@ responses = [
{file = "responses-0.12.1-py2.py3-none-any.whl", hash = "sha256:ef265bd3200bdef5ec17912fc64a23570ba23597fd54ca75c18650fa1699213d"},
{file = "responses-0.12.1.tar.gz", hash = "sha256:2e5764325c6b624e42b428688f2111fea166af46623cb0127c05f6afb14d3457"},
]
+rich = [
+ {file = "rich-12.1.0-py3-none-any.whl", hash = "sha256:b60ff99f4ff7e3d1d37444dee2b22fdd941c622dbc37841823ec1ce7f058b263"},
+ {file = "rich-12.1.0.tar.gz", hash = "sha256:198ae15807a7c1bf84ceabf662e902731bf8f874f9e775e2289cab02bb6a4e30"},
+]
sh = [
{file = "sh-1.14.1-py2.py3-none-any.whl", hash = "sha256:75e86a836f47de095d4531718fe8489e6f7446c75ddfa5596f632727b919ffae"},
{file = "sh-1.14.1.tar.gz", hash = "sha256:39aa9af22f6558a0c5d132881cf43e34828ca03e4ae11114852ca6a55c7c1d8e"},
diff --git a/pyproject.toml b/pyproject.toml
index 4a11e1a7..756754bb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "scanapi"
-version = "2.6.1"
+version = "2.6.2"
description = "Automated Testing and Documentation for your REST API"
authors = ["Camila Maia "]
license = "MIT"
@@ -22,6 +22,7 @@ requests = "2.26.0"
appdirs = "^1.4.4"
curlify2 = "^1.0.1"
MarkupSafe = "2.0.1"
+rich = "12.1.0"
[tool.poetry.dev-dependencies]
codecov = "2.1.7"
diff --git a/scanapi/__main__.py b/scanapi/__main__.py
index 01eaa838..2145c86b 100644
--- a/scanapi/__main__.py
+++ b/scanapi/__main__.py
@@ -3,6 +3,7 @@
import click
import yaml
from pkg_resources import get_distribution
+from rich.logging import RichHandler
from scanapi.exit_code import ExitCode
from scanapi.scan import scan
@@ -77,7 +78,16 @@ def run(
Automated Testing and Documentation for your REST API.
SPEC_PATH argument is the API specification file path.
"""
- logging.basicConfig(level=log_level, format="%(message)s")
+ logging.basicConfig(
+ level=log_level,
+ format="%(message)s",
+ datefmt="[%X]",
+ handlers=[
+ RichHandler(
+ show_time=False, markup=True, show_path=(log_level == "DEBUG")
+ )
+ ],
+ )
logger = logging.getLogger(__name__)
click_preferences = {
diff --git a/scanapi/config_loader.py b/scanapi/config_loader.py
index cee81154..c372e6d8 100644
--- a/scanapi/config_loader.py
+++ b/scanapi/config_loader.py
@@ -44,7 +44,10 @@ def load_config_file(file_path):
returns it.
"""
with open(file_path, "r") as stream:
- logger.info(f"Loading file {file_path}")
+ logger.info(
+ f"Loading file [deep_sky_blue1 underline]{file_path}",
+ extra={"highlighter": None},
+ )
data = yaml.load(stream, Loader)
if not data:
diff --git a/scanapi/console.py b/scanapi/console.py
new file mode 100644
index 00000000..6f5562eb
--- /dev/null
+++ b/scanapi/console.py
@@ -0,0 +1,89 @@
+from rich.console import Console
+
+from scanapi.session import session
+from scanapi.test_status import TestStatus
+
+console = Console()
+
+
+def write_results(results):
+ """Print the test results to the console output
+
+ Returns:
+ None
+ """
+ for r in results:
+ write_result(r)
+
+
+def write_result(result):
+ """Print the test result to the console output
+
+ Returns:
+ None
+ """
+ for test in result["tests_results"]:
+ if test["status"] is TestStatus.PASSED:
+ console.print(f"[bright_green] [PASSED] [white]{test['name']}")
+ if test["status"] == TestStatus.FAILED:
+ console.print(
+ f"[bright_red] [FAILED] [white]{test['name']}\n"
+ f"\t [bright_red]{test['failure']} is false"
+ )
+
+
+def write_report_path(uri):
+ """Print path to generated documentation
+
+ Returns:
+ None
+ """
+ console.print(
+ f"The documentation was generated successfully.\n"
+ f"It is available at -> [deep_sky_blue1 underline]{uri}\n"
+ )
+
+
+def write_summary():
+ """Write tests summary in console
+
+ Returns:
+ None
+ """
+ elapsed_time = round(session.elapsed_time().total_seconds(), 2)
+
+ if session.failures > 0 or session.errors > 0:
+ _print_summary_with_failures_or_errors(elapsed_time)
+ return
+
+ _print_successful_summary(elapsed_time)
+
+
+def _print_summary_with_failures_or_errors(elapsed_time):
+ """Write tests summary when there are failures or errors
+
+ Returns:
+ None
+ """
+ summary = (
+ f"[bright_green]{session.successes} passed, "
+ f"[bright_red]{session.failures} failed, "
+ f"[bright_red]{session.errors} errors in {elapsed_time}s"
+ )
+ console.line()
+ console.rule(summary, characters="=", style="bright_red")
+ console.line()
+
+
+def _print_successful_summary(elapsed_time):
+ """Write tests summary when there are no failures or errors
+
+ Returns:
+ None
+ """
+ console.line()
+ console.rule(
+ f"[bright_green]{session.successes} passed in {elapsed_time}s",
+ characters="=",
+ )
+ console.line()
diff --git a/scanapi/evaluators/code_evaluator.py b/scanapi/evaluators/code_evaluator.py
index b0ac1c08..ee124e15 100644
--- a/scanapi/evaluators/code_evaluator.py
+++ b/scanapi/evaluators/code_evaluator.py
@@ -1,6 +1,5 @@
# Available imports to be used dinamically in the API spec
import datetime # noqa: F401
-import logging
import math # noqa: F401
import random # noqa: F401
import re
@@ -9,8 +8,6 @@
from scanapi.errors import InvalidPythonCodeError
-logger = logging.getLogger(__name__)
-
class CodeEvaluator:
python_code_pattern = re.compile(
@@ -62,6 +59,9 @@ def evaluate(cls, sequence, spec_vars, is_a_test_case=False):
def _assert_code(cls, code, response):
"""Assert a Python code statement.
+ The eval's global context is enriched with the response to support
+ comprehensions.
+
Args:
code[string]: python code that ScanAPI needs to assert
response[requests.Response]: the response for the current request
@@ -77,7 +77,8 @@ def _assert_code(cls, code, response):
AssertionError: If python statement evaluates False
"""
- ok = eval(code) # noqa
+ global_context = {**globals(), **{"response": response}}
+ ok = eval(code, global_context) # noqa
return ok, None if ok else code.strip()
@classmethod
diff --git a/scanapi/evaluators/spec_evaluator.py b/scanapi/evaluators/spec_evaluator.py
index 85141336..2345c326 100644
--- a/scanapi/evaluators/spec_evaluator.py
+++ b/scanapi/evaluators/spec_evaluator.py
@@ -1,11 +1,8 @@
-import logging
import re
from functools import singledispatch
from scanapi.evaluators.string_evaluator import StringEvaluator
-logger = logging.getLogger(__name__)
-
class SpecEvaluator:
def __init__(self, endpoint, spec_vars, extras=None, filter_responses=True):
diff --git a/scanapi/evaluators/string_evaluator.py b/scanapi/evaluators/string_evaluator.py
index 3128c4bd..7a63e297 100644
--- a/scanapi/evaluators/string_evaluator.py
+++ b/scanapi/evaluators/string_evaluator.py
@@ -1,12 +1,9 @@
-import logging
import os
import re
from scanapi.errors import BadConfigurationError
from scanapi.evaluators.code_evaluator import CodeEvaluator
-logger = logging.getLogger(__name__)
-
class StringEvaluator:
"""
diff --git a/scanapi/reporter.py b/scanapi/reporter.py
index b84dfa30..4cb810f9 100644
--- a/scanapi/reporter.py
+++ b/scanapi/reporter.py
@@ -1,17 +1,14 @@
#!/usr/bin/env python3
import datetime
-import logging
import pathlib
import webbrowser
from pkg_resources import get_distribution
+from scanapi.console import write_report_path
from scanapi.session import session
from scanapi.settings import settings
from scanapi.template_render import render
-from scanapi.test_status import TestStatus
-
-logger = logging.getLogger(__name__)
class Reporter:
@@ -28,7 +25,7 @@ def __init__(self, output_path=None, template=None):
self.output_path = pathlib.Path(output_path or "scanapi-report.html")
self.template = template
- def write(self, results):
+ def write(self, results, open_in_browser):
"""Part of the Reporter instance that is responsible for writing
scanapi-report.html.
@@ -39,8 +36,6 @@ def write(self, results):
None
"""
- logger.info("Writing documentation")
-
template_path = self.template if self.template else "report.html"
has_external_template = bool(self.template)
context = self._build_context(results)
@@ -50,32 +45,15 @@ def write(self, results):
with open(self.output_path, "w", newline="\n") as doc:
doc.write(content)
- logger.info("\nThe documentation was generated successfully.")
- logger.info(f"It is available at {self.output_path.resolve().as_uri()}")
+ write_report_path(self.output_path.resolve().as_uri())
+
+ if open_in_browser:
+ self._open_in_browser()
- def open_report_in_browser(self):
+ def _open_in_browser(self):
"""Open the results file on a browser"""
webbrowser.open(self.output_path.resolve().as_uri())
- @staticmethod
- def write_without_generating_report(results):
- """Part of the Reporter instance that is responsible for writing the
- results without generating the scanapi-report.html.
-
- Args:
- results [generator]: generator of dicts resulting of Request run().
-
- Returns:
- None
- """
- logger.info("Writing results without generating report")
- for r in results:
- if logger.root.level != logging.DEBUG:
- for test in r["tests_results"]:
- logger.info(f" [{test['status'].upper()}] {test['name']}")
- if test["status"] == TestStatus.FAILED:
- logger.info(f"\t {test['failure']} is false")
-
@staticmethod
def _build_context(results):
"""Build context dict of values required to render template.
diff --git a/scanapi/scan.py b/scanapi/scan.py
index 429af103..db7173b5 100644
--- a/scanapi/scan.py
+++ b/scanapi/scan.py
@@ -3,6 +3,7 @@
import yaml
from scanapi.config_loader import load_config_file
+from scanapi.console import write_results, write_summary
from scanapi.errors import (
BadConfigurationError,
EmptyConfigFileError,
@@ -21,8 +22,6 @@
def scan():
"""Caller function that tries to scans the file and write the report."""
spec_path = settings["spec_path"]
- no_report = settings["no_report"]
- open_browser = settings["open_browser"]
try:
api_spec = load_config_file(spec_path)
@@ -50,38 +49,40 @@ def scan():
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
- if no_report:
- write_without_generating_report(results)
- else:
- try:
- write_report(results)
- if open_browser:
- open_report_in_browser()
- except (BadConfigurationError, InvalidPythonCodeError) as e:
- logger.error(e)
- raise SystemExit(ExitCode.USAGE_ERROR)
-
+ _write(results)
+ write_summary()
session.exit()
-def write_report(results):
- """Constructs a Reporter object and calls the write method of Reporter to
- push the results to a file.
+def _write(results):
+ """When the user passed the `--no-report` flag: prints the test results to
+ the console output.
+ When the user did not pass the `--no_report flag`: writes the results on a
+ report file and opens it using a browser, if the --browser flag is present.
+
+ Returns:
+ None
"""
- reporter = Reporter(settings["output_path"], settings["template"])
- reporter.write(results)
+ no_report = settings["no_report"]
+ open_browser = settings["open_browser"]
+ if no_report:
+ write_results(results)
+ return
-def open_report_in_browser():
- """Open the results file on a browser"""
- reporter = Reporter(settings["output_path"], settings["template"])
- reporter.open_report_in_browser()
+ try:
+ _write_report(results, open_browser)
+ except (BadConfigurationError, InvalidPythonCodeError) as e:
+ logger.error(e)
+ raise SystemExit(ExitCode.USAGE_ERROR)
-def write_without_generating_report(results):
- """Constructs a Reporter object and calls the
- write_without_generating_report method of Reporter to print the results to
- the console output without generating a report.
+def _write_report(results, open_browser):
+ """Constructs a Reporter object and calls the write method of Reporter to
+ push the results to a file.
+
+ Returns:
+ None
"""
- reporter = Reporter()
- reporter.write_without_generating_report(results)
+ reporter = Reporter(settings["output_path"], settings["template"])
+ reporter.write(results, open_browser)
diff --git a/scanapi/tree/request_node.py b/scanapi/tree/request_node.py
index 1af94821..e7f7d104 100644
--- a/scanapi/tree/request_node.py
+++ b/scanapi/tree/request_node.py
@@ -1,8 +1,9 @@
-import logging
import time
+from scanapi.console import console, write_result
from scanapi.errors import HTTPMethodNotAllowedError
from scanapi.hide_utils import hide_sensitive_info
+from scanapi.settings import settings
from scanapi.test_status import TestStatus
from scanapi.tree.testing_node import TestingNode
from scanapi.tree.tree_keys import (
@@ -19,10 +20,17 @@
)
from scanapi.utils import join_urls, session_with_retry, validate_keys
-logger = logging.getLogger(__name__)
-
class RequestNode:
+ """
+ Class that represents a request. It's used as a child of an EndpointNode
+ where each EndpointNode may contain multiple children RequestNode.
+
+ Attributes:
+ spec[dict]: dictionary containing the request's specifications
+ endpoint[EndpointNode]: the parent node
+ """
+
SCOPE = "request"
ALLOWED_KEYS = (
BODY_KEY,
@@ -125,7 +133,7 @@ def run(self):
method = self.http_method
url = self.full_url_path
- logger.info("Making request %s %s", method, url)
+ console.print(f"\n- Making request {method} {url}", highlight=False)
self.endpoint.spec_vars.update(
self.spec.get(VARS_KEY, {}),
@@ -155,7 +163,7 @@ def run(self):
del self.endpoint.spec_vars["response"]
- return {
+ result = {
"response": response,
"tests_results": tests_results,
"no_failure": all(
@@ -165,6 +173,11 @@ def run(self):
"request_node_name": self.name,
}
+ if not settings["no_report"]:
+ write_result(result)
+
+ return result
+
def _run_tests(self):
"""Run all tests cases of request node.
diff --git a/scanapi/tree/testing_node.py b/scanapi/tree/testing_node.py
index 9aebabaf..0e19458a 100644
--- a/scanapi/tree/testing_node.py
+++ b/scanapi/tree/testing_node.py
@@ -1,12 +1,8 @@
-import logging
-
from scanapi.session import session
from scanapi.test_status import TestStatus
from scanapi.tree.tree_keys import ASSERT_KEY, NAME_KEY
from scanapi.utils import validate_keys
-logger = logging.getLogger(__name__)
-
class TestingNode:
__test__ = False
@@ -51,7 +47,6 @@ def run(self):
error = str(e)
self._process_result(status)
- self._log_result(status, failure)
return {
"name": self.full_name,
@@ -79,11 +74,6 @@ def _process_result(status):
if status == TestStatus.PASSED:
session.increment_successes()
- def _log_result(self, status, failure):
- logger.debug("\a [%s] %s", status.upper(), self.full_name)
- if failure:
- logger.debug("\t %s is false", failure)
-
def _validate(self):
validate_keys(
self.spec.keys(), self.ALLOWED_KEYS, self.REQUIRED_KEYS, self.SCOPE
diff --git a/tests/unit/evaluators/test_code_evaluator.py b/tests/unit/evaluators/test_code_evaluator.py
index b40498ee..5092b48c 100644
--- a/tests/unit/evaluators/test_code_evaluator.py
+++ b/tests/unit/evaluators/test_code_evaluator.py
@@ -40,6 +40,7 @@ def test_should_return_assert_results(self, sequence, expected):
test_data = [
("${{response.text == 'abcde'}}", (True, None)),
("${{response.url == 'http://test.com/'}}", (True, None),),
+ ("${{all(x in response.text for x in 'abc')}}", (True, None)),
(
"${{response.status_code == 300}}",
(False, "response.status_code == 300"),
diff --git a/tests/unit/test_console.py b/tests/unit/test_console.py
new file mode 100644
index 00000000..67289b5b
--- /dev/null
+++ b/tests/unit/test_console.py
@@ -0,0 +1,175 @@
+from datetime import timedelta
+from unittest.mock import MagicMock
+
+from pytest import fixture, mark
+
+from scanapi.console import (
+ write_report_path,
+ write_result,
+ write_results,
+ write_summary,
+)
+
+
+@fixture
+def mocked__console(mocker):
+ return mocker.patch("scanapi.console.console")
+
+
+@mark.describe("console")
+@mark.describe("write_result")
+class TestWriteResults:
+ @fixture
+ def mocked__write_result(self, mocker):
+ return mocker.patch("scanapi.console.write_result")
+
+ @mark.context("when results is empty")
+ @mark.it("should not call write_result")
+ def test_should_not_call(self, mocked__write_result):
+ write_results([])
+
+ assert not mocked__write_result.called
+
+ @mark.context("when results has size 3")
+ @mark.it("should call write_result 3 times")
+ def test_should_call_3_times(self, mocked__write_result):
+ write_results([1, 2, 3])
+
+ assert mocked__write_result.call_count == 3
+
+
+@mark.describe("console")
+@mark.describe("write_result")
+class TestWriteResult:
+ @mark.context("when tests results contains one success")
+ @mark.it("should print the success result")
+ def test_write_success(self, mocked__console):
+
+ tests_results = [
+ {
+ "name": "should_be_success",
+ "status": "passed",
+ "failure": None,
+ "error": None,
+ }
+ ]
+
+ fake_result_passed = {
+ "tests_results": tests_results,
+ }
+
+ write_result(fake_result_passed)
+
+ mocked__console.print.assert_called_once_with(
+ "[bright_green] [PASSED] [white]should_be_success"
+ )
+
+ @mark.context("when tests results contains one success and one failure")
+ @mark.it(
+ "should print two lines, one for the success and one for the failure"
+ )
+ def test_write_failures(self, mocker, mocked__console):
+ tests = [
+ {
+ "name": "failed_test",
+ "status": "failed",
+ "failure": "response.status_code == 200",
+ "error": None,
+ },
+ {
+ "name": "should_be_success",
+ "status": "passed",
+ "failure": None,
+ "error": None,
+ },
+ ]
+
+ fake_result_failed = {
+ "tests_results": tests,
+ }
+
+ write_result(fake_result_failed)
+
+ calls = [
+ mocker.call(
+ "[bright_red] [FAILED] [white]failed_test\n\t [bright_red]response.status_code == 200 is false"
+ ),
+ mocker.call("[bright_green] [PASSED] [white]should_be_success"),
+ ]
+
+ mocked__console.print.assert_has_calls(calls)
+
+ @mark.context("when session has no tests")
+ @mark.it("should print nothing")
+ def test_write_without_tests(self, mocked__console):
+ fake_result_failed = {
+ "tests_results": [],
+ }
+
+ write_result(fake_result_failed)
+
+ assert not mocked__console.print.called
+
+
+@mark.describe("console")
+@mark.describe("write_report_path")
+class TestLogReport:
+ @mark.it("should write report path")
+ def test(self, mocked__console):
+
+ write_report_path("http://localhost:8080")
+
+ mocked__console.print.assert_called_once_with(
+ "The documentation was generated successfully.\n"
+ "It is available at -> [deep_sky_blue1 underline]http://localhost:8080\n"
+ )
+
+
+@mark.describe("console")
+@mark.describe("write_summary")
+class TestWriteSummary:
+ @fixture
+ def mocked__session(self, mocker):
+ session = MagicMock()
+ session.errors = 0
+ session.elapsed_time.return_value = timedelta(seconds=3)
+ return mocker.patch("scanapi.console.session", session)
+
+ @mark.context("when session has successes and no failures")
+ @mark.it("should print the success summary")
+ def test_write_success(self, mocked__console, mocked__session):
+ mocked__session.failures = 0
+ mocked__session.successes = 1
+
+ write_summary()
+
+ mocked__console.rule.assert_called_once_with(
+ "[bright_green]1 passed in 3.0s", characters="=",
+ )
+
+ @mark.context("when session has failures")
+ @mark.it("should print the failure summary")
+ def test_write_failures(self, mocked__console, mocked__session):
+ mocked__session.failures = 1
+ mocked__session.successes = 1
+
+ write_summary()
+
+ mocked__console.rule.assert_called_once_with(
+ "[bright_green]1 passed, [bright_red]1 failed, [bright_red]0 errors in 3.0s",
+ characters="=",
+ style="bright_red",
+ )
+
+ @mark.context("when session has no tests")
+ @mark.it("should print the success summary")
+ def test_write_without_tests(self, mocked__console, mocked__session):
+
+ mocked__session.failures = 0
+ mocked__session.successes = 0
+
+ write_summary()
+
+ mocked__console.rule.assert_called_once_with(
+ "[bright_green]0 passed in 3.0s", characters="=",
+ )
diff --git a/tests/unit/test_reporter.py b/tests/unit/test_reporter.py
index adf45662..7ea9f743 100644
--- a/tests/unit/test_reporter.py
+++ b/tests/unit/test_reporter.py
@@ -63,6 +63,10 @@ def mocked__session(self, mocker):
def mocked__logger(self, mocker):
return mocker.patch("scanapi.reporter.logger")
+ @fixture
+ def mocked__webbrowser(self, mocker):
+ return mocker.patch("scanapi.reporter.webbrowser")
+
@fixture
def mock_get_distribution(self, mocker):
class MockDistro:
@@ -101,7 +105,7 @@ def test_should_write_to_default_output(
):
mocked__render.return_value = "ScanAPI Report"
reporter = Reporter()
- reporter.write(fake_results)
+ reporter.write(fake_results, False)
mocked__render.assert_called_once_with("report.html", context, False)
mocked__open.assert_called_once_with(
@@ -121,7 +125,7 @@ def test_should_write_to_custom_output(
):
mocked__render.return_value = "ScanAPI Report"
reporter = Reporter("./custom/report-output.html", "html")
- reporter.write(fake_results)
+ reporter.write(fake_results, False)
mocked__render.assert_called_once_with("html", context, True)
mocked__open.assert_called_once_with(
@@ -141,7 +145,7 @@ def test_should_handle_custom_templates(
):
mocked__render.return_value = "ScanAPI Report"
reporter = Reporter(template="my-template.html")
- reporter.write(fake_results)
+ reporter.write(fake_results, False)
mocked__render.assert_called_once_with(
"my-template.html", context, True
@@ -151,13 +155,18 @@ def test_should_handle_custom_templates(
)
mocked__open().write.assert_called_once_with("ScanAPI Report")
- @mark.it("should write without generating report")
- def test_should_write_without_generating_report(
- self, mocker, mocked__render, mocked__open, mocked__logger,
+ @mark.it("should open report in browser")
+ def test_should_open_report_in_browser(
+ self,
+ mocker,
+ mocked__render,
+ mocked__open,
+ mocked__session,
+ mock_get_distribution,
+ context,
+ mocked__webbrowser,
):
- reporter = Reporter()
- reporter.write_without_generating_report(fake_results)
- mocked__render.assert_not_called()
- mocked__open.assert_not_called()
- mocked__logger.info.assert_called_once()
+ reporter = Reporter()
+ reporter.write(fake_results, True)
+ assert mocked__webbrowser.open.call_count == 1
diff --git a/tests/unit/test_scan.py b/tests/unit/test_scan.py
index c6bd165f..7ed93623 100644
--- a/tests/unit/test_scan.py
+++ b/tests/unit/test_scan.py
@@ -7,7 +7,7 @@
from pytest import fixture, mark, raises
from scanapi.errors import EmptyConfigFileError, InvalidKeyError
-from scanapi.scan import open_report_in_browser, scan, write_report
+from scanapi.scan import scan
log = logging.getLogger(__name__)
@@ -121,113 +121,111 @@ def test_should_log_error_4(self, mocker, caplog):
)
@mark.context("when the api spec is ok")
- @mark.it("should call reporter write_report")
- def test_should_call_reporter(self, mocker, response):
+ @mark.it(
+ "should call reporter.write, call console.write_summary and exit the session"
+ )
+ def test_should_call_reporter_write_call_console_write_summary_and_exit(
+ self, mocker, response
+ ):
+ mocker.patch(
+ "scanapi.scan.settings",
+ {
+ "spec_path": "",
+ "no_report": False,
+ "open_browser": False,
+ "output_path": "",
+ "template": None,
+ },
+ )
+
mock_load_config_file = mocker.patch("scanapi.scan.load_config_file")
mock_load_config_file.return_value = {"endpoints": []}
mock_endpoint_init = mocker.patch("scanapi.scan.EndpointNode.__init__")
mock_endpoint_init.return_value = None
mock_endpoint_run = mocker.patch("scanapi.scan.EndpointNode.run")
mock_endpoint_run.return_value = [response]
- mock_write_report = mocker.patch("scanapi.scan.write_report")
+ mock_reporter_write = mocker.patch("scanapi.scan.Reporter.write")
+ mock_console_write_summary = mocker.patch("scanapi.scan.write_summary")
with raises(SystemExit) as excinfo:
scan()
+ mock_reporter_write.assert_called_once_with([response], False)
+ mock_console_write_summary.assert_called_once_with()
+
assert excinfo.type == SystemExit
assert excinfo.value.code == 0
- mock_endpoint_init.assert_called_once_with({"endpoints": []})
- assert mock_endpoint_run.called
- mock_write_report.assert_called_once_with([response])
-
- @mark.context(
- "when the api spec is ok and the is configured to open the results"
+ @mark.context("when the api spec is ok")
+ @mark.context("when no_report is True")
+ @mark.it(
+ "should call console.write_results, call console.write_summary and exit the session"
)
- @mark.it("should call reporter write_report and open_report_in_browser")
- def test_should_call_reporter_and_open_results(self, mocker, response):
+ def test_should_call_console_write_results_call_console_write_summary_and_exit(
+ self, mocker, response
+ ):
+ mocker.patch(
+ "scanapi.scan.settings",
+ {
+ "spec_path": "",
+ "no_report": True,
+ "open_browser": False,
+ "output_path": "",
+ "template": None,
+ },
+ )
+
mock_load_config_file = mocker.patch("scanapi.scan.load_config_file")
mock_load_config_file.return_value = {"endpoints": []}
mock_endpoint_init = mocker.patch("scanapi.scan.EndpointNode.__init__")
mock_endpoint_init.return_value = None
mock_endpoint_run = mocker.patch("scanapi.scan.EndpointNode.run")
mock_endpoint_run.return_value = [response]
- mock_write_report = mocker.patch("scanapi.scan.write_report")
- mock_open_report_in_browser = mocker.patch(
- "scanapi.scan.open_report_in_browser"
- )
- mocker.patch(
- "scanapi.scan.settings",
- {
- "spec_path": None,
- "output_path": "out/my-report.md",
- "no_report": False,
- "open_browser": True,
- "reporter": "markdown",
- "template": "my-template.jinja",
- },
- )
+ mock_console_write_results = mocker.patch("scanapi.scan.write_results")
+ mock_console_write_summary = mocker.patch("scanapi.scan.write_summary")
with raises(SystemExit) as excinfo:
scan()
+ mock_console_write_results.assert_called_once_with([response])
+ mock_console_write_summary.assert_called_once_with()
+
assert excinfo.type == SystemExit
assert excinfo.value.code == 0
- mock_endpoint_init.assert_called_once_with({"endpoints": []})
- assert mock_endpoint_run.called
- mock_write_report.assert_called_once_with([response])
- mock_open_report_in_browser.assert_called_once()
-
-
-@mark.describe("scan")
-@mark.describe("write_report")
-class TestWriteReport:
- @mark.it("should call wr")
- def test_should_call_wr(self, mocker, response):
- mock_write = mocker.patch("scanapi.scan.Reporter.write")
- mock_reporter_init = mocker.patch("scanapi.scan.Reporter.__init__")
- mock_reporter_init.return_value = None
+ @mark.context("when the api spec is ok")
+ @mark.context("when open_browser is True")
+ @mark.it(
+ "should call reporter.write passing open_browser as True, call console.write_summary and exit the session"
+ )
+ def test_should_call_reporter_write_with_open_browser_true_call_console_write_summary_and_exit(
+ self, mocker, response
+ ):
mocker.patch(
"scanapi.scan.settings",
{
- "output_path": "out/my-report.md",
+ "spec_path": "",
"no_report": False,
- "reporter": "markdown",
- "template": "my-template.jinja",
+ "open_browser": True,
+ "output_path": "",
+ "template": None,
},
)
- write_report([response])
-
- mock_reporter_init.assert_called_once_with(
- "out/my-report.md", "my-template.jinja"
- )
- mock_write.assert_called_once_with([response])
-
-
-@mark.describe("scan")
-@mark.describe("open_report_in_browser")
-class TestOpenReport:
- @mark.it("should call open browser")
- def test_should_call_wr(self, mocker, response):
- mock_open = mocker.patch("scanapi.scan.Reporter.open_report_in_browser")
- mock_reporter_init = mocker.patch("scanapi.scan.Reporter.__init__")
- mock_reporter_init.return_value = None
- mocker.patch(
- "scanapi.scan.settings",
- {
- "output_path": "out/my-report.md",
- "no_report": False,
- "reporter": "markdown",
- "template": "my-template.jinja",
- },
- )
+ mock_load_config_file = mocker.patch("scanapi.scan.load_config_file")
+ mock_load_config_file.return_value = {"endpoints": []}
+ mock_endpoint_init = mocker.patch("scanapi.scan.EndpointNode.__init__")
+ mock_endpoint_init.return_value = None
+ mock_endpoint_run = mocker.patch("scanapi.scan.EndpointNode.run")
+ mock_endpoint_run.return_value = [response]
+ mock_reporter_write = mocker.patch("scanapi.scan.Reporter.write")
+ mock_console_write_summary = mocker.patch("scanapi.scan.write_summary")
- open_report_in_browser()
+ with raises(SystemExit) as excinfo:
+ scan()
- mock_reporter_init.assert_called_once_with(
- "out/my-report.md", "my-template.jinja"
- )
+ mock_reporter_write.assert_called_once_with([response], True)
+ mock_console_write_summary.assert_called_once_with()
- mock_open.assert_called_once()
+ assert excinfo.type == SystemExit
+ assert excinfo.value.code == 0
diff --git a/tests/unit/tree/request_node/test_run.py b/tests/unit/tree/request_node/test_run.py
index 2b7b6c73..fb3508aa 100644
--- a/tests/unit/tree/request_node/test_run.py
+++ b/tests/unit/tree/request_node/test_run.py
@@ -18,6 +18,10 @@ def mock_run_tests(self, mocker):
def mock_time_sleep(self, mocker):
return mocker.patch("scanapi.tree.request_node.time.sleep")
+ @fixture
+ def mock_console_write_result(self, mocker):
+ return mocker.patch("scanapi.tree.request_node.write_result")
+
@mark.it("should call the request method")
def test_calls_request(self, mock_session, mock_time_sleep):
request = RequestNode(
@@ -46,6 +50,40 @@ def test_calls_request(self, mock_session, mock_time_sleep):
"request_node_name": "request_name",
}
+ @mark.context("when no_report is False")
+ @mark.it("should call the write_result method")
+ def test_calls_write_result(self, mocker, mock_console_write_result):
+ mocker.patch(
+ "scanapi.tree.request_node.settings", {"no_report": False,},
+ )
+
+ request = RequestNode(
+ {"path": "http://foo.com", "name": "request_name"},
+ endpoint=EndpointNode(
+ {"name": "endpoint_name", "requests": [{}], "delay": 1}
+ ),
+ )
+ request.run()
+
+ assert mock_console_write_result.call_count == 1
+
+ @mark.context("when no_report is True")
+ @mark.it("should not call the write_result method")
+ def test_doesnt_write_result(self, mocker, mock_console_write_result):
+ mocker.patch(
+ "scanapi.tree.request_node.settings", {"no_report": True,},
+ )
+
+ request = RequestNode(
+ {"path": "http://foo.com", "name": "request_name"},
+ endpoint=EndpointNode(
+ {"name": "endpoint_name", "requests": [{}], "delay": 1}
+ ),
+ )
+ request.run()
+
+ assert not mock_console_write_result.called
+
test_data = [
([{"status": "passed"}, {"status": "failed"}], False,),
([{"status": "passed"}, {"status": "passed"}], True,),
@@ -54,7 +92,12 @@ def test_calls_request(self, mock_session, mock_time_sleep):
@mark.parametrize("test_results, expected_no_failure", test_data)
@mark.it("should build the result object")
def test_build_result(
- self, test_results, expected_no_failure, mock_session, mock_run_tests,
+ self,
+ test_results,
+ expected_no_failure,
+ mock_session,
+ mock_run_tests,
+ mock_console_write_result,
):
mock_run_tests.return_value = test_results
request = RequestNode(
diff --git a/tests/unit/tree/testing_node/test_run.py b/tests/unit/tree/testing_node/test_run.py
index 9d29cfde..1eecec57 100644
--- a/tests/unit/tree/testing_node/test_run.py
+++ b/tests/unit/tree/testing_node/test_run.py
@@ -1,11 +1,7 @@
-import logging
-
from pytest import fixture, mark
from scanapi.tree import EndpointNode, RequestNode, TestingNode
-log = logging.getLogger(__name__)
-
@mark.describe("testing node")
@mark.describe("run")
@@ -68,15 +64,6 @@ def test_increment_successes(
assert mock_increment_successes.call_count == 1
assert not mock_increment_failures.called
- @mark.context("when test passed")
- @mark.it("should logs test results")
- def test_logs_test_results(self, mock_evaluate, caplog, testing_node):
- mock_evaluate.return_value = (True, None)
-
- with caplog.at_level(logging.DEBUG):
- testing_node.run()
- assert "\x07 [PASSED] foo::bar::status_is_200" in caplog.text
-
@mark.context("when test failed")
@mark.it("should build result object")
def test_build_result_2(
@@ -112,17 +99,3 @@ def test_increment_failures(
testing_node.run()
assert mock_increment_failures.call_count == 1
assert not mock_increment_successes.called
-
- @mark.context("when test failed")
- @mark.it("should logs test results")
- def test_logs_test_results_2(self, mock_evaluate, caplog, testing_node):
- mock_evaluate.return_value = (
- False,
- "response.status_code == 200",
- )
-
- with caplog.at_level(logging.DEBUG):
- testing_node.run()
-
- assert "\x07 [FAILED] foo::bar::status_is_200" in caplog.text
- assert "\t response.status_code == 200 is false" in caplog.text