pytest
pytest
Complete Python testing toolkit with pytest.
Running Tests
# Run all tests
pytest
# Run specific file/directory
pytest tests/test_api.py
pytest tests/
# Run specific test
pytest tests/test_api.py::test_login
pytest tests/test_api.py::TestUserClass::test_create
# Verbose output
pytest -v
pytest -vv # Extra verbose
# Stop on first failure
pytest -x
# Run last failed
pytest --lf
# Run failed first, then rest
pytest --ff
# Show print output
pytest -s
# Parallel (requires pytest-xdist)
pytest -n auto
pytest -n 4
Fixtures
import pytest
# Basic fixture
@pytest.fixture
def user():
return {"name": "Alice", "email": "alice@test.com"}
# Fixture with teardown
@pytest.fixture
def db_connection():
conn = create_connection()
yield conn
conn.close()
# Autouse fixture (runs for every test)
@pytest.fixture(autouse=True)
def reset_state():
State.reset()
yield
State.cleanup()
# Scoped fixtures
@pytest.fixture(scope="module")
def expensive_resource():
return load_heavy_thing()
# scope options: "function" (default), "class", "module", "package", "session"
# Fixture with params
@pytest.fixture(params=["sqlite", "postgres", "mysql"])
def db_engine(request):
return create_engine(request.param)
# Using fixtures
def test_user_name(user):
assert user["name"] == "Alice"
Parametrize
# Basic parametrize
@pytest.mark.parametrize("input,expected", [
("hello", 5),
("", 0),
("world", 5),
])
def test_string_length(input, expected):
assert len(input) == expected
# Multiple parameters
@pytest.mark.parametrize("x", [0, 1])
@pytest.mark.parametrize("y", [2, 3])
def test_combinations(x, y):
pass # Runs 4 times: (0,2), (0,3), (1,2), (1,3)
# With IDs
@pytest.mark.parametrize("input,expected", [
pytest.param("admin", True, id="admin-user"),
pytest.param("guest", False, id="guest-user"),
])
def test_is_admin(input, expected):
assert is_admin(input) == expected
Mocking
from unittest.mock import Mock, patch, MagicMock
# patch decorator
@patch("myapp.services.send_email")
def test_registration(mock_send):
register_user("alice@test.com")
mock_send.assert_called_once_with("alice@test.com", subject="Welcome")
# patch as context manager
def test_api_call():
with patch("myapp.client.requests.get") as mock_get:
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_status()
assert result == "ok"
# Mock fixture (pytest-mock)
def test_with_mocker(mocker):
mock_db = mocker.patch("myapp.db.query")
mock_db.return_value = [{"id": 1}]
result = get_users()
assert len(result) == 1
# Side effects
mock_func = Mock(side_effect=ValueError("boom"))
mock_func = Mock(side_effect=[1, 2, 3]) # Returns sequentially
mock_func = Mock(side_effect=lambda x: x * 2)
# Spec mocking (catches attribute errors)
mock_obj = Mock(spec=MyClass)
Markers
# Skip
@pytest.mark.skip(reason="Not implemented yet")
def test_future_feature():
pass
# Skip conditionally
@pytest.mark.skipif(sys.platform == "win32", reason="Unix only")
def test_unix_permissions():
pass
# Expected failure
@pytest.mark.xfail(reason="Known bug #123")
def test_known_bug():
pass
# Custom markers
@pytest.mark.slow
def test_heavy_computation():
pass
# Run by marker: pytest -m slow
# Run excluding: pytest -m "not slow"
# Register markers in pytest.ini or pyproject.toml
# [tool.pytest.ini_options]
# markers = ["slow: marks tests as slow"]
Assertions
# Basic
assert result == expected
assert item in collection
assert value is None
# Exception testing
with pytest.raises(ValueError):
int("not_a_number")
with pytest.raises(ValueError, match="invalid literal"):
int("not_a_number")
# Approximate
assert 0.1 + 0.2 == pytest.approx(0.3)
assert result == pytest.approx(expected, rel=1e-3)
# Warnings
with pytest.warns(DeprecationWarning):
deprecated_function()
Coverage
# Install
pip install pytest-cov
# Run with coverage
pytest --cov=myapp
pytest --cov=myapp --cov-report=html
pytest --cov=myapp --cov-report=term-missing
pytest --cov=myapp --cov-branch # Branch coverage
# Minimum threshold
pytest --cov=myapp --cov-fail-under=80
conftest.py
# tests/conftest.py - Shared fixtures available to all tests
import pytest
@pytest.fixture
def app():
"""Create test application."""
app = create_app(testing=True)
yield app
@pytest.fixture
def client(app):
"""Create test client."""
return app.test_client()
@pytest.fixture
def auth_headers():
"""Create auth headers."""
token = create_test_token()
return {"Authorization": f"Bearer {token}"}
pyproject.toml Config
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_functions = ["test_*"]
addopts = "-v --tb=short --strict-markers"
markers = [
"slow: marks tests as slow",
"integration: integration tests",
]
filterwarnings = [
"ignore::DeprecationWarning",
]
Reference
For advanced patterns, plugins, and async testing: references/patterns.md
More from 1mangesh1/dev-skills-collection
curl-http
HTTP request construction and API testing with curl and HTTPie. Use when user asks to "test API", "make HTTP request", "curl POST", "send request", "test endpoint", "debug API", "upload file", "check response time", "set auth header", "basic auth with curl", "send JSON", "test webhook", "check status code", "follow redirects", "rate limit testing", "measure API latency", "stress test endpoint", "mock API response", or any HTTP calls from the command line.
28database-indexing
Database indexing internals, index type selection, query plan analysis, and write-overhead tradeoffs across PostgreSQL, MySQL, and MongoDB. Use when user asks to "optimize queries", "create indexes", "fix slow queries", "read EXPLAIN output", "reduce query time", "index strategy", "database performance", "composite index", "covering index", "partial index", "index bloat", "unused indexes", or needs help diagnosing and resolving database performance problems.
13testing-strategies
Testing strategies, patterns, and methodologies across the full testing spectrum. Use when asked about unit tests, integration tests, e2e tests, test pyramid, mocking, test doubles, TDD, property-based testing, snapshot testing, test coverage, mutation testing, contract testing, performance testing, test data management, CI/CD testing, flaky tests, test anti-patterns, test organization, test isolation, test fixtures, test parameterization, or any testing strategy, approach, or methodology.
10secret-scanner
This skill should be used when the user asks to "scan for secrets", "find API keys", "detect credentials", "check for hardcoded passwords", "find leaked tokens", "scan for sensitive keys", "check git history for secrets", "audit repository for credentials", or mentions secret detection, credential scanning, API key exposure, token leakage, password detection, or security key auditing.
10terraform
Terraform infrastructure as code for provisioning, modules, state management, and workspaces. Use when user asks to "create infrastructure", "write Terraform", "manage state", "create module", "import resource", "plan changes", or any IaC tasks.
10kubernetes
Kubernetes and kubectl mastery for deployments, services, pods, debugging, and cluster management. Use when user asks to "deploy to k8s", "create deployment", "debug pod", "kubectl commands", "scale service", "check pod logs", "create ingress", or any Kubernetes tasks.
10