mirror of
				https://github.com/esphome/esphome.git
				synced 2025-10-30 22:53:59 +00:00 
			
		
		
		
	Merge branch 'auto_auth' into integration
This commit is contained in:
		
							
								
								
									
										14
									
								
								tests/integration/fixtures/host_mode_api_password.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								tests/integration/fixtures/host_mode_api_password.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| esphome: | ||||
|   name: host-mode-api-password | ||||
| host: | ||||
| api: | ||||
|   password: "test_password_123" | ||||
| logger: | ||||
|   level: DEBUG | ||||
| # Test sensor to verify connection works | ||||
| sensor: | ||||
|   - platform: template | ||||
|     name: Test Sensor | ||||
|     id: test_sensor | ||||
|     lambda: return 42.0; | ||||
|     update_interval: 0.1s | ||||
							
								
								
									
										43
									
								
								tests/integration/fixtures/scheduler_null_name.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								tests/integration/fixtures/scheduler_null_name.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| esphome: | ||||
|   name: scheduler-null-name | ||||
|  | ||||
| host: | ||||
|  | ||||
| logger: | ||||
|   level: DEBUG | ||||
|  | ||||
| api: | ||||
|   services: | ||||
|     - service: test_null_name | ||||
|       then: | ||||
|         - lambda: |- | ||||
|             // First, create a scenario that would trigger the crash | ||||
|             // The crash happens when defer() is called with a name that would be cancelled | ||||
|  | ||||
|             // Test 1: Create a defer with a valid name | ||||
|             App.scheduler.set_timeout(nullptr, "test_defer", 0, []() { | ||||
|               ESP_LOGI("TEST", "First defer should be cancelled"); | ||||
|             }); | ||||
|  | ||||
|             // Test 2: Create another defer with the same name - this triggers cancel_item_locked_ | ||||
|             // In the unfixed code, this would crash if the name was NULL | ||||
|             App.scheduler.set_timeout(nullptr, "test_defer", 0, []() { | ||||
|               ESP_LOGI("TEST", "Second defer executed"); | ||||
|             }); | ||||
|  | ||||
|             // Test 3: Now test with nullptr - this is the actual crash scenario | ||||
|             // Create a defer item without a name (like voice assistant does) | ||||
|             const char* null_name = nullptr; | ||||
|             App.scheduler.set_timeout(nullptr, null_name, 0, []() { | ||||
|               ESP_LOGI("TEST", "Defer with null name executed"); | ||||
|             }); | ||||
|  | ||||
|             // Test 4: Create another defer with null name - this would trigger the crash | ||||
|             App.scheduler.set_timeout(nullptr, null_name, 0, []() { | ||||
|               ESP_LOGI("TEST", "Second null defer executed"); | ||||
|             }); | ||||
|  | ||||
|             // Test 5: Verify scheduler still works | ||||
|             App.scheduler.set_timeout(nullptr, "valid_timeout", 50, []() { | ||||
|               ESP_LOGI("TEST", "Test completed successfully"); | ||||
|             }); | ||||
							
								
								
									
										53
									
								
								tests/integration/test_host_mode_api_password.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								tests/integration/test_host_mode_api_password.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| """Integration test for API password authentication.""" | ||||
|  | ||||
| from __future__ import annotations | ||||
|  | ||||
| import asyncio | ||||
|  | ||||
| from aioesphomeapi import APIConnectionError | ||||
| import pytest | ||||
|  | ||||
| from .types import APIClientConnectedFactory, RunCompiledFunction | ||||
|  | ||||
|  | ||||
| @pytest.mark.asyncio | ||||
| async def test_host_mode_api_password( | ||||
|     yaml_config: str, | ||||
|     run_compiled: RunCompiledFunction, | ||||
|     api_client_connected: APIClientConnectedFactory, | ||||
| ) -> None: | ||||
|     """Test API authentication with password.""" | ||||
|     async with run_compiled(yaml_config): | ||||
|         # Connect with correct password | ||||
|         async with api_client_connected(password="test_password_123") as client: | ||||
|             # Verify we can get device info | ||||
|             device_info = await client.device_info() | ||||
|             assert device_info is not None | ||||
|             assert device_info.uses_password is True | ||||
|             assert device_info.name == "host-mode-api-password" | ||||
|  | ||||
|             # Subscribe to states to ensure authenticated connection works | ||||
|             loop = asyncio.get_running_loop() | ||||
|             state_future: asyncio.Future[bool] = loop.create_future() | ||||
|             states = {} | ||||
|  | ||||
|             def on_state(state): | ||||
|                 states[state.key] = state | ||||
|                 if not state_future.done(): | ||||
|                     state_future.set_result(True) | ||||
|  | ||||
|             client.subscribe_states(on_state) | ||||
|  | ||||
|             # Wait for at least one state with timeout | ||||
|             try: | ||||
|                 await asyncio.wait_for(state_future, timeout=5.0) | ||||
|             except asyncio.TimeoutError: | ||||
|                 pytest.fail("No states received within timeout") | ||||
|  | ||||
|             # Should have received at least one state (the test sensor) | ||||
|             assert len(states) > 0 | ||||
|  | ||||
|         # Test with wrong password - should fail | ||||
|         with pytest.raises(APIConnectionError, match="Invalid password"): | ||||
|             async with api_client_connected(password="wrong_password"): | ||||
|                 pass  # Should not reach here | ||||
							
								
								
									
										59
									
								
								tests/integration/test_scheduler_null_name.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								tests/integration/test_scheduler_null_name.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,59 @@ | ||||
| """Test that scheduler handles NULL names safely without crashing.""" | ||||
|  | ||||
| import asyncio | ||||
| import re | ||||
|  | ||||
| import pytest | ||||
|  | ||||
| from .types import APIClientConnectedFactory, RunCompiledFunction | ||||
|  | ||||
|  | ||||
| @pytest.mark.asyncio | ||||
| async def test_scheduler_null_name( | ||||
|     yaml_config: str, | ||||
|     run_compiled: RunCompiledFunction, | ||||
|     api_client_connected: APIClientConnectedFactory, | ||||
| ) -> None: | ||||
|     """Test that scheduler handles NULL names safely without crashing.""" | ||||
|  | ||||
|     loop = asyncio.get_running_loop() | ||||
|     test_complete_future: asyncio.Future[bool] = loop.create_future() | ||||
|  | ||||
|     # Pattern to match test completion | ||||
|     test_complete_pattern = re.compile(r"Test completed successfully") | ||||
|  | ||||
|     def check_output(line: str) -> None: | ||||
|         """Check log output for test completion.""" | ||||
|         if not test_complete_future.done() and test_complete_pattern.search(line): | ||||
|             test_complete_future.set_result(True) | ||||
|  | ||||
|     async with run_compiled(yaml_config, line_callback=check_output): | ||||
|         async with api_client_connected() as client: | ||||
|             # Verify we can connect | ||||
|             device_info = await client.device_info() | ||||
|             assert device_info is not None | ||||
|             assert device_info.name == "scheduler-null-name" | ||||
|  | ||||
|             # List services | ||||
|             _, services = await asyncio.wait_for( | ||||
|                 client.list_entities_services(), timeout=5.0 | ||||
|             ) | ||||
|  | ||||
|             # Find our test service | ||||
|             test_null_name_service = next( | ||||
|                 (s for s in services if s.name == "test_null_name"), None | ||||
|             ) | ||||
|             assert test_null_name_service is not None, ( | ||||
|                 "test_null_name service not found" | ||||
|             ) | ||||
|  | ||||
|             # Execute the test | ||||
|             client.execute_service(test_null_name_service, {}) | ||||
|  | ||||
|             # Wait for test completion | ||||
|             try: | ||||
|                 await asyncio.wait_for(test_complete_future, timeout=10.0) | ||||
|             except asyncio.TimeoutError: | ||||
|                 pytest.fail( | ||||
|                     "Test did not complete within timeout - likely crashed due to NULL name" | ||||
|                 ) | ||||
							
								
								
									
										352
									
								
								tests/script/test_determine_jobs.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										352
									
								
								tests/script/test_determine_jobs.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,352 @@ | ||||
| """Unit tests for script/determine-jobs.py module.""" | ||||
|  | ||||
| from collections.abc import Generator | ||||
| import importlib.util | ||||
| import json | ||||
| import os | ||||
| import subprocess | ||||
| import sys | ||||
| from unittest.mock import Mock, patch | ||||
|  | ||||
| import pytest | ||||
|  | ||||
| # Add the script directory to Python path so we can import the module | ||||
| script_dir = os.path.abspath( | ||||
|     os.path.join(os.path.dirname(__file__), "..", "..", "script") | ||||
| ) | ||||
| sys.path.insert(0, script_dir) | ||||
|  | ||||
| spec = importlib.util.spec_from_file_location( | ||||
|     "determine_jobs", os.path.join(script_dir, "determine-jobs.py") | ||||
| ) | ||||
| determine_jobs = importlib.util.module_from_spec(spec) | ||||
| spec.loader.exec_module(determine_jobs) | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def mock_should_run_integration_tests() -> Generator[Mock, None, None]: | ||||
|     """Mock should_run_integration_tests from helpers.""" | ||||
|     with patch.object(determine_jobs, "should_run_integration_tests") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def mock_should_run_clang_tidy() -> Generator[Mock, None, None]: | ||||
|     """Mock should_run_clang_tidy from helpers.""" | ||||
|     with patch.object(determine_jobs, "should_run_clang_tidy") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def mock_should_run_clang_format() -> Generator[Mock, None, None]: | ||||
|     """Mock should_run_clang_format from helpers.""" | ||||
|     with patch.object(determine_jobs, "should_run_clang_format") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def mock_should_run_python_linters() -> Generator[Mock, None, None]: | ||||
|     """Mock should_run_python_linters from helpers.""" | ||||
|     with patch.object(determine_jobs, "should_run_python_linters") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| @pytest.fixture | ||||
| def mock_subprocess_run() -> Generator[Mock, None, None]: | ||||
|     """Mock subprocess.run for list-components.py calls.""" | ||||
|     with patch.object(determine_jobs.subprocess, "run") as mock: | ||||
|         yield mock | ||||
|  | ||||
|  | ||||
| def test_main_all_tests_should_run( | ||||
|     mock_should_run_integration_tests: Mock, | ||||
|     mock_should_run_clang_tidy: Mock, | ||||
|     mock_should_run_clang_format: Mock, | ||||
|     mock_should_run_python_linters: Mock, | ||||
|     mock_subprocess_run: Mock, | ||||
|     capsys: pytest.CaptureFixture[str], | ||||
| ) -> None: | ||||
|     """Test when all tests should run.""" | ||||
|     mock_should_run_integration_tests.return_value = True | ||||
|     mock_should_run_clang_tidy.return_value = True | ||||
|     mock_should_run_clang_format.return_value = True | ||||
|     mock_should_run_python_linters.return_value = True | ||||
|  | ||||
|     # Mock list-components.py output | ||||
|     mock_result = Mock() | ||||
|     mock_result.stdout = "wifi\napi\nsensor\n" | ||||
|     mock_subprocess_run.return_value = mock_result | ||||
|  | ||||
|     # Run main function with mocked argv | ||||
|     with patch("sys.argv", ["determine-jobs.py"]): | ||||
|         determine_jobs.main() | ||||
|  | ||||
|     # Check output | ||||
|     captured = capsys.readouterr() | ||||
|     output = json.loads(captured.out) | ||||
|  | ||||
|     assert output["integration_tests"] is True | ||||
|     assert output["clang_tidy"] is True | ||||
|     assert output["clang_format"] is True | ||||
|     assert output["python_linters"] is True | ||||
|     assert output["changed_components"] == ["wifi", "api", "sensor"] | ||||
|     assert output["component_test_count"] == 3 | ||||
|  | ||||
|  | ||||
| def test_main_no_tests_should_run( | ||||
|     mock_should_run_integration_tests: Mock, | ||||
|     mock_should_run_clang_tidy: Mock, | ||||
|     mock_should_run_clang_format: Mock, | ||||
|     mock_should_run_python_linters: Mock, | ||||
|     mock_subprocess_run: Mock, | ||||
|     capsys: pytest.CaptureFixture[str], | ||||
| ) -> None: | ||||
|     """Test when no tests should run.""" | ||||
|     mock_should_run_integration_tests.return_value = False | ||||
|     mock_should_run_clang_tidy.return_value = False | ||||
|     mock_should_run_clang_format.return_value = False | ||||
|     mock_should_run_python_linters.return_value = False | ||||
|  | ||||
|     # Mock empty list-components.py output | ||||
|     mock_result = Mock() | ||||
|     mock_result.stdout = "" | ||||
|     mock_subprocess_run.return_value = mock_result | ||||
|  | ||||
|     # Run main function with mocked argv | ||||
|     with patch("sys.argv", ["determine-jobs.py"]): | ||||
|         determine_jobs.main() | ||||
|  | ||||
|     # Check output | ||||
|     captured = capsys.readouterr() | ||||
|     output = json.loads(captured.out) | ||||
|  | ||||
|     assert output["integration_tests"] is False | ||||
|     assert output["clang_tidy"] is False | ||||
|     assert output["clang_format"] is False | ||||
|     assert output["python_linters"] is False | ||||
|     assert output["changed_components"] == [] | ||||
|     assert output["component_test_count"] == 0 | ||||
|  | ||||
|  | ||||
| def test_main_list_components_fails( | ||||
|     mock_should_run_integration_tests: Mock, | ||||
|     mock_should_run_clang_tidy: Mock, | ||||
|     mock_should_run_clang_format: Mock, | ||||
|     mock_should_run_python_linters: Mock, | ||||
|     mock_subprocess_run: Mock, | ||||
|     capsys: pytest.CaptureFixture[str], | ||||
| ) -> None: | ||||
|     """Test when list-components.py fails.""" | ||||
|     mock_should_run_integration_tests.return_value = True | ||||
|     mock_should_run_clang_tidy.return_value = True | ||||
|     mock_should_run_clang_format.return_value = True | ||||
|     mock_should_run_python_linters.return_value = True | ||||
|  | ||||
|     # Mock list-components.py failure | ||||
|     mock_subprocess_run.side_effect = subprocess.CalledProcessError(1, "cmd") | ||||
|  | ||||
|     # Run main function with mocked argv - should raise | ||||
|     with patch("sys.argv", ["determine-jobs.py"]): | ||||
|         with pytest.raises(subprocess.CalledProcessError): | ||||
|             determine_jobs.main() | ||||
|  | ||||
|  | ||||
| def test_main_with_branch_argument( | ||||
|     mock_should_run_integration_tests: Mock, | ||||
|     mock_should_run_clang_tidy: Mock, | ||||
|     mock_should_run_clang_format: Mock, | ||||
|     mock_should_run_python_linters: Mock, | ||||
|     mock_subprocess_run: Mock, | ||||
|     capsys: pytest.CaptureFixture[str], | ||||
| ) -> None: | ||||
|     """Test with branch argument.""" | ||||
|     mock_should_run_integration_tests.return_value = False | ||||
|     mock_should_run_clang_tidy.return_value = True | ||||
|     mock_should_run_clang_format.return_value = False | ||||
|     mock_should_run_python_linters.return_value = True | ||||
|  | ||||
|     # Mock list-components.py output | ||||
|     mock_result = Mock() | ||||
|     mock_result.stdout = "mqtt\n" | ||||
|     mock_subprocess_run.return_value = mock_result | ||||
|  | ||||
|     with patch("sys.argv", ["script.py", "-b", "main"]): | ||||
|         determine_jobs.main() | ||||
|  | ||||
|     # Check that functions were called with branch | ||||
|     mock_should_run_integration_tests.assert_called_once_with("main") | ||||
|     mock_should_run_clang_tidy.assert_called_once_with("main") | ||||
|     mock_should_run_clang_format.assert_called_once_with("main") | ||||
|     mock_should_run_python_linters.assert_called_once_with("main") | ||||
|  | ||||
|     # Check that list-components.py was called with branch | ||||
|     mock_subprocess_run.assert_called_once() | ||||
|     call_args = mock_subprocess_run.call_args[0][0] | ||||
|     assert "--changed" in call_args | ||||
|     assert "-b" in call_args | ||||
|     assert "main" in call_args | ||||
|  | ||||
|     # Check output | ||||
|     captured = capsys.readouterr() | ||||
|     output = json.loads(captured.out) | ||||
|  | ||||
|     assert output["integration_tests"] is False | ||||
|     assert output["clang_tidy"] is True | ||||
|     assert output["clang_format"] is False | ||||
|     assert output["python_linters"] is True | ||||
|     assert output["changed_components"] == ["mqtt"] | ||||
|     assert output["component_test_count"] == 1 | ||||
|  | ||||
|  | ||||
| def test_should_run_integration_tests( | ||||
|     monkeypatch: pytest.MonkeyPatch, | ||||
| ) -> None: | ||||
|     """Test should_run_integration_tests function.""" | ||||
|     # Core C++ files trigger tests | ||||
|     with patch.object( | ||||
|         determine_jobs, "changed_files", return_value=["esphome/core/component.cpp"] | ||||
|     ): | ||||
|         result = determine_jobs.should_run_integration_tests() | ||||
|         assert result is True | ||||
|  | ||||
|     # Core Python files trigger tests | ||||
|     with patch.object( | ||||
|         determine_jobs, "changed_files", return_value=["esphome/core/config.py"] | ||||
|     ): | ||||
|         result = determine_jobs.should_run_integration_tests() | ||||
|         assert result is True | ||||
|  | ||||
|     # Python files directly in esphome/ do NOT trigger tests | ||||
|     with patch.object( | ||||
|         determine_jobs, "changed_files", return_value=["esphome/config.py"] | ||||
|     ): | ||||
|         result = determine_jobs.should_run_integration_tests() | ||||
|         assert result is False | ||||
|  | ||||
|     # Python files in subdirectories (not core) do NOT trigger tests | ||||
|     with patch.object( | ||||
|         determine_jobs, | ||||
|         "changed_files", | ||||
|         return_value=["esphome/dashboard/web_server.py"], | ||||
|     ): | ||||
|         result = determine_jobs.should_run_integration_tests() | ||||
|         assert result is False | ||||
|  | ||||
|  | ||||
| def test_should_run_integration_tests_with_branch() -> None: | ||||
|     """Test should_run_integration_tests with branch argument.""" | ||||
|     with patch.object(determine_jobs, "changed_files") as mock_changed: | ||||
|         mock_changed.return_value = [] | ||||
|         determine_jobs.should_run_integration_tests("release") | ||||
|         mock_changed.assert_called_once_with("release") | ||||
|  | ||||
|  | ||||
| def test_should_run_integration_tests_component_dependency() -> None: | ||||
|     """Test that integration tests run when components used in fixtures change.""" | ||||
|     with patch.object( | ||||
|         determine_jobs, "changed_files", return_value=["esphome/components/api/api.cpp"] | ||||
|     ): | ||||
|         with patch.object( | ||||
|             determine_jobs, "get_components_from_integration_fixtures" | ||||
|         ) as mock_fixtures: | ||||
|             mock_fixtures.return_value = {"api", "sensor"} | ||||
|             with patch.object(determine_jobs, "get_all_dependencies") as mock_deps: | ||||
|                 mock_deps.return_value = {"api", "sensor", "network"} | ||||
|                 result = determine_jobs.should_run_integration_tests() | ||||
|                 assert result is True | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
|     ("check_returncode", "changed_files", "expected_result"), | ||||
|     [ | ||||
|         (0, [], True),  # Hash changed - need full scan | ||||
|         (1, ["esphome/core.cpp"], True),  # C++ file changed | ||||
|         (1, ["README.md"], False),  # No C++ files changed | ||||
|     ], | ||||
| ) | ||||
| def test_should_run_clang_tidy( | ||||
|     check_returncode: int, | ||||
|     changed_files: list[str], | ||||
|     expected_result: bool, | ||||
| ) -> None: | ||||
|     """Test should_run_clang_tidy function.""" | ||||
|     with patch.object(determine_jobs, "changed_files", return_value=changed_files): | ||||
|         # Test with hash check returning specific code | ||||
|         with patch("subprocess.run") as mock_run: | ||||
|             mock_run.return_value = Mock(returncode=check_returncode) | ||||
|             result = determine_jobs.should_run_clang_tidy() | ||||
|             assert result == expected_result | ||||
|  | ||||
|         # Test with hash check failing (exception) | ||||
|         if check_returncode != 0: | ||||
|             with patch("subprocess.run", side_effect=Exception("Failed")): | ||||
|                 result = determine_jobs.should_run_clang_tidy() | ||||
|             assert result is True  # Fail safe - run clang-tidy | ||||
|  | ||||
|  | ||||
| def test_should_run_clang_tidy_with_branch() -> None: | ||||
|     """Test should_run_clang_tidy with branch argument.""" | ||||
|     with patch.object(determine_jobs, "changed_files") as mock_changed: | ||||
|         mock_changed.return_value = [] | ||||
|         with patch("subprocess.run") as mock_run: | ||||
|             mock_run.return_value = Mock(returncode=1)  # Hash unchanged | ||||
|             determine_jobs.should_run_clang_tidy("release") | ||||
|             mock_changed.assert_called_once_with("release") | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
|     ("changed_files", "expected_result"), | ||||
|     [ | ||||
|         (["esphome/core.py"], True), | ||||
|         (["script/test.py"], True), | ||||
|         (["esphome/test.pyi"], True),  # .pyi files should trigger | ||||
|         (["README.md"], False), | ||||
|         ([], False), | ||||
|     ], | ||||
| ) | ||||
| def test_should_run_python_linters( | ||||
|     changed_files: list[str], expected_result: bool | ||||
| ) -> None: | ||||
|     """Test should_run_python_linters function.""" | ||||
|     with patch.object(determine_jobs, "changed_files", return_value=changed_files): | ||||
|         result = determine_jobs.should_run_python_linters() | ||||
|         assert result == expected_result | ||||
|  | ||||
|  | ||||
| def test_should_run_python_linters_with_branch() -> None: | ||||
|     """Test should_run_python_linters with branch argument.""" | ||||
|     with patch.object(determine_jobs, "changed_files") as mock_changed: | ||||
|         mock_changed.return_value = [] | ||||
|         determine_jobs.should_run_python_linters("release") | ||||
|         mock_changed.assert_called_once_with("release") | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
|     ("changed_files", "expected_result"), | ||||
|     [ | ||||
|         (["esphome/core.cpp"], True), | ||||
|         (["esphome/core.h"], True), | ||||
|         (["test.hpp"], True), | ||||
|         (["test.cc"], True), | ||||
|         (["test.cxx"], True), | ||||
|         (["test.c"], True), | ||||
|         (["test.tcc"], True), | ||||
|         (["README.md"], False), | ||||
|         ([], False), | ||||
|     ], | ||||
| ) | ||||
| def test_should_run_clang_format( | ||||
|     changed_files: list[str], expected_result: bool | ||||
| ) -> None: | ||||
|     """Test should_run_clang_format function.""" | ||||
|     with patch.object(determine_jobs, "changed_files", return_value=changed_files): | ||||
|         result = determine_jobs.should_run_clang_format() | ||||
|         assert result == expected_result | ||||
|  | ||||
|  | ||||
| def test_should_run_clang_format_with_branch() -> None: | ||||
|     """Test should_run_clang_format with branch argument.""" | ||||
|     with patch.object(determine_jobs, "changed_files") as mock_changed: | ||||
|         mock_changed.return_value = [] | ||||
|         determine_jobs.should_run_clang_format("release") | ||||
|         mock_changed.assert_called_once_with("release") | ||||
| @@ -27,6 +27,7 @@ _filter_changed_ci = helpers._filter_changed_ci | ||||
| _filter_changed_local = helpers._filter_changed_local | ||||
| build_all_include = helpers.build_all_include | ||||
| print_file_list = helpers.print_file_list | ||||
| get_all_dependencies = helpers.get_all_dependencies | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
| @@ -154,6 +155,14 @@ def test_github_actions_push_event(monkeypatch: MonkeyPatch) -> None: | ||||
|         assert result == expected_files | ||||
|  | ||||
|  | ||||
| @pytest.fixture(autouse=True) | ||||
| def clear_caches(): | ||||
|     """Clear function caches before each test.""" | ||||
|     # Clear the cache for _get_changed_files_github_actions | ||||
|     _get_changed_files_github_actions.cache_clear() | ||||
|     yield | ||||
|  | ||||
|  | ||||
| def test_get_changed_files_github_actions_pull_request( | ||||
|     monkeypatch: MonkeyPatch, | ||||
| ) -> None: | ||||
| @@ -847,3 +856,159 @@ def test_print_file_list_default_title(capsys: pytest.CaptureFixture[str]) -> No | ||||
|  | ||||
|     assert "Files:" in captured.out | ||||
|     assert "    test.cpp" in captured.out | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
|     ("component_configs", "initial_components", "expected_components"), | ||||
|     [ | ||||
|         # No dependencies | ||||
|         ( | ||||
|             {"sensor": ([], [])},  # (dependencies, auto_load) | ||||
|             {"sensor"}, | ||||
|             {"sensor"}, | ||||
|         ), | ||||
|         # Simple dependencies | ||||
|         ( | ||||
|             { | ||||
|                 "sensor": (["esp32"], []), | ||||
|                 "esp32": ([], []), | ||||
|             }, | ||||
|             {"sensor"}, | ||||
|             {"sensor", "esp32"}, | ||||
|         ), | ||||
|         # Auto-load components | ||||
|         ( | ||||
|             { | ||||
|                 "light": ([], ["output", "power_supply"]), | ||||
|                 "output": ([], []), | ||||
|                 "power_supply": ([], []), | ||||
|             }, | ||||
|             {"light"}, | ||||
|             {"light", "output", "power_supply"}, | ||||
|         ), | ||||
|         # Transitive dependencies | ||||
|         ( | ||||
|             { | ||||
|                 "comp_a": (["comp_b"], []), | ||||
|                 "comp_b": (["comp_c"], []), | ||||
|                 "comp_c": ([], []), | ||||
|             }, | ||||
|             {"comp_a"}, | ||||
|             {"comp_a", "comp_b", "comp_c"}, | ||||
|         ), | ||||
|         # Dependencies with dots (sensor.base) | ||||
|         ( | ||||
|             { | ||||
|                 "my_comp": (["sensor.base", "binary_sensor.base"], []), | ||||
|                 "sensor": ([], []), | ||||
|                 "binary_sensor": ([], []), | ||||
|             }, | ||||
|             {"my_comp"}, | ||||
|             {"my_comp", "sensor", "binary_sensor"}, | ||||
|         ), | ||||
|         # Circular dependencies (should not cause infinite loop) | ||||
|         ( | ||||
|             { | ||||
|                 "comp_a": (["comp_b"], []), | ||||
|                 "comp_b": (["comp_a"], []), | ||||
|             }, | ||||
|             {"comp_a"}, | ||||
|             {"comp_a", "comp_b"}, | ||||
|         ), | ||||
|     ], | ||||
| ) | ||||
| def test_get_all_dependencies( | ||||
|     component_configs: dict[str, tuple[list[str], list[str]]], | ||||
|     initial_components: set[str], | ||||
|     expected_components: set[str], | ||||
| ) -> None: | ||||
|     """Test dependency resolution for components.""" | ||||
|     with patch("esphome.loader.get_component") as mock_get_component: | ||||
|  | ||||
|         def get_component_side_effect(name: str): | ||||
|             if name in component_configs: | ||||
|                 deps, auto_load = component_configs[name] | ||||
|                 comp = Mock() | ||||
|                 comp.dependencies = deps | ||||
|                 comp.auto_load = auto_load | ||||
|                 return comp | ||||
|             return None | ||||
|  | ||||
|         mock_get_component.side_effect = get_component_side_effect | ||||
|  | ||||
|         result = helpers.get_all_dependencies(initial_components) | ||||
|  | ||||
|         assert result == expected_components | ||||
|  | ||||
|  | ||||
| def test_get_all_dependencies_handles_missing_components() -> None: | ||||
|     """Test handling of components that can't be loaded.""" | ||||
|     with patch("esphome.loader.get_component") as mock_get_component: | ||||
|         # First component exists, its dependency doesn't | ||||
|         comp = Mock() | ||||
|         comp.dependencies = ["missing_comp"] | ||||
|         comp.auto_load = [] | ||||
|  | ||||
|         mock_get_component.side_effect = ( | ||||
|             lambda name: comp if name == "existing" else None | ||||
|         ) | ||||
|  | ||||
|         result = helpers.get_all_dependencies({"existing", "nonexistent"}) | ||||
|  | ||||
|         # Should still include all components, even if some can't be loaded | ||||
|         assert result == {"existing", "nonexistent", "missing_comp"} | ||||
|  | ||||
|  | ||||
| def test_get_all_dependencies_empty_set() -> None: | ||||
|     """Test with empty initial component set.""" | ||||
|     result = helpers.get_all_dependencies(set()) | ||||
|     assert result == set() | ||||
|  | ||||
|  | ||||
| def test_get_components_from_integration_fixtures() -> None: | ||||
|     """Test extraction of components from fixture YAML files.""" | ||||
|     yaml_content = { | ||||
|         "sensor": [{"platform": "template", "name": "test"}], | ||||
|         "binary_sensor": [{"platform": "gpio", "pin": 5}], | ||||
|         "esphome": {"name": "test"}, | ||||
|         "api": {}, | ||||
|     } | ||||
|     expected_components = { | ||||
|         "sensor", | ||||
|         "binary_sensor", | ||||
|         "esphome", | ||||
|         "api", | ||||
|         "template", | ||||
|         "gpio", | ||||
|     } | ||||
|  | ||||
|     mock_yaml_file = Mock() | ||||
|  | ||||
|     with ( | ||||
|         patch("pathlib.Path.glob") as mock_glob, | ||||
|         patch("builtins.open", create=True), | ||||
|         patch("yaml.safe_load", return_value=yaml_content), | ||||
|     ): | ||||
|         mock_glob.return_value = [mock_yaml_file] | ||||
|  | ||||
|         components = helpers.get_components_from_integration_fixtures() | ||||
|  | ||||
|         assert components == expected_components | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize( | ||||
|     "output,expected", | ||||
|     [ | ||||
|         ("wifi\napi\nsensor\n", ["wifi", "api", "sensor"]), | ||||
|         ("wifi\n", ["wifi"]), | ||||
|         ("", []), | ||||
|         ("  \n  \n", []), | ||||
|         ("\n\n", []), | ||||
|         ("  wifi  \n  api  \n", ["wifi", "api"]), | ||||
|         ("wifi\n\napi\n\nsensor", ["wifi", "api", "sensor"]), | ||||
|     ], | ||||
| ) | ||||
| def test_parse_list_components_output(output: str, expected: list[str]) -> None: | ||||
|     """Test parse_list_components_output function.""" | ||||
|     result = helpers.parse_list_components_output(output) | ||||
|     assert result == expected | ||||
|   | ||||
| @@ -8,9 +8,19 @@ from typing import Any | ||||
| import pytest | ||||
|  | ||||
| from esphome.config_validation import Invalid | ||||
| from esphome.const import CONF_DEVICE_ID, CONF_DISABLED_BY_DEFAULT, CONF_ICON, CONF_NAME | ||||
| from esphome.const import ( | ||||
|     CONF_DEVICE_ID, | ||||
|     CONF_DISABLED_BY_DEFAULT, | ||||
|     CONF_ICON, | ||||
|     CONF_INTERNAL, | ||||
|     CONF_NAME, | ||||
| ) | ||||
| from esphome.core import CORE, ID, entity_helpers | ||||
| from esphome.core.entity_helpers import get_base_entity_object_id, setup_entity | ||||
| from esphome.core.entity_helpers import ( | ||||
|     entity_duplicate_validator, | ||||
|     get_base_entity_object_id, | ||||
|     setup_entity, | ||||
| ) | ||||
| from esphome.cpp_generator import MockObj | ||||
| from esphome.helpers import sanitize, snake_case | ||||
|  | ||||
| @@ -493,11 +503,6 @@ async def test_setup_entity_disabled_by_default( | ||||
|  | ||||
| def test_entity_duplicate_validator() -> None: | ||||
|     """Test the entity_duplicate_validator function.""" | ||||
|     from esphome.core.entity_helpers import entity_duplicate_validator | ||||
|  | ||||
|     # Reset CORE unique_ids for clean test | ||||
|     CORE.unique_ids.clear() | ||||
|  | ||||
|     # Create validator for sensor platform | ||||
|     validator = entity_duplicate_validator("sensor") | ||||
|  | ||||
| @@ -523,11 +528,6 @@ def test_entity_duplicate_validator() -> None: | ||||
|  | ||||
| def test_entity_duplicate_validator_with_devices() -> None: | ||||
|     """Test entity_duplicate_validator with devices.""" | ||||
|     from esphome.core.entity_helpers import entity_duplicate_validator | ||||
|  | ||||
|     # Reset CORE unique_ids for clean test | ||||
|     CORE.unique_ids.clear() | ||||
|  | ||||
|     # Create validator for sensor platform | ||||
|     validator = entity_duplicate_validator("sensor") | ||||
|  | ||||
| @@ -605,3 +605,36 @@ def test_entity_different_platforms_yaml_validation( | ||||
|     ) | ||||
|     # This should succeed | ||||
|     assert result is not None | ||||
|  | ||||
|  | ||||
| def test_entity_duplicate_validator_internal_entities() -> None: | ||||
|     """Test that internal entities are excluded from duplicate name validation.""" | ||||
|     # Create validator for sensor platform | ||||
|     validator = entity_duplicate_validator("sensor") | ||||
|  | ||||
|     # First entity should pass | ||||
|     config1 = {CONF_NAME: "Temperature"} | ||||
|     validated1 = validator(config1) | ||||
|     assert validated1 == config1 | ||||
|     assert ("sensor", "temperature") in CORE.unique_ids | ||||
|  | ||||
|     # Internal entity with same name should pass (not added to unique_ids) | ||||
|     config2 = {CONF_NAME: "Temperature", CONF_INTERNAL: True} | ||||
|     validated2 = validator(config2) | ||||
|     assert validated2 == config2 | ||||
|     # Internal entity should not be added to unique_ids | ||||
|     assert len([k for k in CORE.unique_ids if k == ("sensor", "temperature")]) == 1 | ||||
|  | ||||
|     # Another internal entity with same name should also pass | ||||
|     config3 = {CONF_NAME: "Temperature", CONF_INTERNAL: True} | ||||
|     validated3 = validator(config3) | ||||
|     assert validated3 == config3 | ||||
|     # Still only one entry in unique_ids (from the non-internal entity) | ||||
|     assert len([k for k in CORE.unique_ids if k == ("sensor", "temperature")]) == 1 | ||||
|  | ||||
|     # Non-internal entity with same name should fail | ||||
|     config4 = {CONF_NAME: "Temperature"} | ||||
|     with pytest.raises( | ||||
|         Invalid, match=r"Duplicate sensor entity with name 'Temperature' found" | ||||
|     ): | ||||
|         validator(config4) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user