Compare commits
3 Commits
75a8447759
...
testing
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf0d134187 | ||
|
|
2092a3e257 | ||
|
|
d46d8de3ee |
@@ -3,9 +3,9 @@
|
||||
enabled=True
|
||||
|
||||
# Select the driver used to play sounds, choices are genericDriver and gstreamerDriver.
|
||||
# Sox is the default.
|
||||
#driver=gstreamerDriver
|
||||
driver=genericDriver
|
||||
# Gstreamer is the default.
|
||||
driver=gstreamerDriver
|
||||
#driver=genericDriver
|
||||
|
||||
# Sound themes. These are the pack of sounds used for sound alerts.
|
||||
# Sound packs may be located at /usr/share/sounds
|
||||
|
||||
@@ -48,7 +48,8 @@ class SpeechHelperMixin:
|
||||
if result.returncode == 0:
|
||||
lines = result.stdout.strip().split("\n")
|
||||
self._modules_cache = [
|
||||
line.strip() for line in lines[1:] if line.strip()
|
||||
line.strip() for line in lines[1:]
|
||||
if line.strip() and line.strip().lower() != "dummy"
|
||||
]
|
||||
self._cache_timestamp = now
|
||||
return self._modules_cache
|
||||
@@ -92,8 +93,13 @@ class SpeechHelperMixin:
|
||||
voice = self._process_espeak_voice(line)
|
||||
if voice:
|
||||
voices.append(voice)
|
||||
elif module.lower() == "voxin":
|
||||
# For Voxin, store voice name with language
|
||||
voice_data = self._process_voxin_voice(line)
|
||||
if voice_data:
|
||||
voices.append(voice_data)
|
||||
else:
|
||||
# For non-espeak modules, extract first field (voice name)
|
||||
# For other modules, extract first field (voice name)
|
||||
parts = line.strip().split()
|
||||
if parts:
|
||||
voices.append(parts[0])
|
||||
@@ -126,6 +132,91 @@ class SpeechHelperMixin:
|
||||
return (f"{lang_code}+{variant}"
|
||||
if variant and variant != "none" else lang_code)
|
||||
|
||||
def _process_voxin_voice(self, voice_line):
|
||||
"""Process Voxin voice format with language information.
|
||||
|
||||
Args:
|
||||
voice_line (str): Raw line from spd-say -o voxin -L output
|
||||
Format: NAME LANGUAGE VARIANT
|
||||
|
||||
Returns:
|
||||
str: Voice name with language encoded (e.g., 'daniel-embedded-high|en-GB')
|
||||
"""
|
||||
parts = [p for p in voice_line.split() if p]
|
||||
if len(parts) < 2:
|
||||
return None
|
||||
voice_name = parts[0]
|
||||
language = parts[1]
|
||||
# Encode language with voice for later extraction
|
||||
return f"{voice_name}|{language}"
|
||||
|
||||
def _select_default_voice(self, voices):
|
||||
"""Select a sensible default voice from list, preferring user's
|
||||
language.
|
||||
|
||||
Args:
|
||||
voices (list): List of available voice names
|
||||
|
||||
Returns:
|
||||
str: Selected default voice (matches user language if possible)
|
||||
"""
|
||||
if not voices:
|
||||
return ""
|
||||
|
||||
# Get current voice to preserve language preference
|
||||
current_voice = self.env["runtime"]["SettingsManager"].get_setting(
|
||||
"speech", "voice"
|
||||
)
|
||||
|
||||
# Get configured language from settings
|
||||
configured_lang = self.env["runtime"]["SettingsManager"].get_setting(
|
||||
"speech", "language"
|
||||
)
|
||||
|
||||
# Extract language code from current voice if available
|
||||
current_lang = None
|
||||
if current_voice:
|
||||
# Extract language code (e.g., 'en-gb' from 'en-gb+male')
|
||||
current_lang = current_voice.split('+')[0].lower()
|
||||
|
||||
# Build preference list: current language, configured language, English
|
||||
preferences = []
|
||||
if current_lang:
|
||||
preferences.append(current_lang)
|
||||
if configured_lang:
|
||||
preferences.append(configured_lang.lower())
|
||||
preferences.extend(['en-gb', 'en-us', 'en'])
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
seen = set()
|
||||
preferences = [x for x in preferences
|
||||
if not (x in seen or seen.add(x))]
|
||||
|
||||
# Try exact matches for preferred languages
|
||||
for pref in preferences:
|
||||
for voice in voices:
|
||||
# Extract language if voice is in "name|lang" format
|
||||
voice_to_check = voice
|
||||
if "|" in voice:
|
||||
_, voice_lang = voice.split("|", 1)
|
||||
voice_to_check = voice_lang
|
||||
if voice_to_check.lower() == pref:
|
||||
return voice
|
||||
|
||||
# Try voices starting with preferred language codes
|
||||
for pref in preferences:
|
||||
for voice in voices:
|
||||
# Extract language if voice is in "name|lang" format
|
||||
voice_to_check = voice
|
||||
if "|" in voice:
|
||||
_, voice_lang = voice.split("|", 1)
|
||||
voice_to_check = voice_lang
|
||||
if voice_to_check.lower().startswith(pref):
|
||||
return voice
|
||||
|
||||
# Fall back to first available voice
|
||||
return voices[0]
|
||||
|
||||
def invalidate_speech_cache(self):
|
||||
"""Clear cached module and voice data."""
|
||||
self._modules_cache = None
|
||||
@@ -387,11 +478,41 @@ class QuickMenuManager(SpeechHelperMixin):
|
||||
"speech", "module", new_module
|
||||
)
|
||||
|
||||
# Reset voice to first available for new module
|
||||
# Select sensible default voice for new module
|
||||
voices = self.get_module_voices(new_module)
|
||||
if voices:
|
||||
default_voice = self._select_default_voice(voices)
|
||||
|
||||
# Parse voice name and language for modules like Voxin
|
||||
voice_name = default_voice
|
||||
voice_lang = None
|
||||
if "|" in default_voice:
|
||||
voice_name, voice_lang = default_voice.split("|", 1)
|
||||
|
||||
self.env["runtime"]["SettingsManager"].set_setting(
|
||||
"speech", "voice", voices[0]
|
||||
"speech", "voice", voice_name
|
||||
)
|
||||
|
||||
# Apply voice to speech driver immediately
|
||||
if "SpeechDriver" in self.env["runtime"]:
|
||||
try:
|
||||
self.env["runtime"]["SpeechDriver"].set_module(
|
||||
new_module
|
||||
)
|
||||
# Set language first if available
|
||||
if voice_lang:
|
||||
self.env["runtime"]["SpeechDriver"].set_language(
|
||||
voice_lang
|
||||
)
|
||||
# Then set voice
|
||||
self.env["runtime"]["SpeechDriver"].set_voice(
|
||||
voice_name
|
||||
)
|
||||
except Exception as e:
|
||||
self.env["runtime"]["DebugManager"].write_debug_out(
|
||||
(f"QuickMenuManager cycle_speech_module: "
|
||||
f"Error applying voice: {e}"),
|
||||
debug.DebugLevel.ERROR
|
||||
)
|
||||
|
||||
# Announce new module
|
||||
@@ -442,12 +563,19 @@ class QuickMenuManager(SpeechHelperMixin):
|
||||
"speech", "voice"
|
||||
)
|
||||
|
||||
# Find current index
|
||||
try:
|
||||
current_index = (voices.index(current_voice)
|
||||
if current_voice else 0)
|
||||
except ValueError:
|
||||
# Find current index (handle Voxin voice|language format)
|
||||
current_index = 0
|
||||
if current_voice:
|
||||
try:
|
||||
# Try exact match first
|
||||
current_index = voices.index(current_voice)
|
||||
except ValueError:
|
||||
# For Voxin, compare just the voice name part
|
||||
for i, voice in enumerate(voices):
|
||||
voice_name = voice.split("|")[0] if "|" in voice else voice
|
||||
if voice_name == current_voice:
|
||||
current_index = i
|
||||
break
|
||||
|
||||
# Cycle to next/previous
|
||||
if direction == "next":
|
||||
@@ -457,14 +585,38 @@ class QuickMenuManager(SpeechHelperMixin):
|
||||
|
||||
new_voice = voices[new_index]
|
||||
|
||||
# Update setting (runtime only)
|
||||
# Parse voice name and language for modules like Voxin
|
||||
voice_name = new_voice
|
||||
voice_lang = None
|
||||
if "|" in new_voice:
|
||||
# Format: "voicename|language" (e.g., "daniel-embedded-high|en-GB")
|
||||
voice_name, voice_lang = new_voice.split("|", 1)
|
||||
|
||||
# Update setting (runtime only) - store the voice name only
|
||||
self.env["runtime"]["SettingsManager"].set_setting(
|
||||
"speech", "voice", new_voice
|
||||
"speech", "voice", voice_name
|
||||
)
|
||||
|
||||
# Announce new voice
|
||||
# Apply voice to speech driver immediately
|
||||
if "SpeechDriver" in self.env["runtime"]:
|
||||
try:
|
||||
# Set language first if available
|
||||
if voice_lang:
|
||||
self.env["runtime"]["SpeechDriver"].set_language(
|
||||
voice_lang
|
||||
)
|
||||
# Then set voice
|
||||
self.env["runtime"]["SpeechDriver"].set_voice(voice_name)
|
||||
except Exception as e:
|
||||
self.env["runtime"]["DebugManager"].write_debug_out(
|
||||
(f"QuickMenuManager cycle_speech_voice: "
|
||||
f"Error applying voice: {e}"),
|
||||
debug.DebugLevel.ERROR
|
||||
)
|
||||
|
||||
# Announce new voice (voice name only, not language)
|
||||
self.env["runtime"]["OutputManager"].present_text(
|
||||
new_voice, interrupt=True
|
||||
voice_name, interrupt=True
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
# Fenrir TTY screen reader
|
||||
# By Chrys, Storm Dragon, and contributors.
|
||||
|
||||
version = "2025.12.02"
|
||||
code_name = "master"
|
||||
version = "2025.12.03"
|
||||
code_name = "testing"
|
||||
|
||||
@@ -30,6 +30,7 @@ class driver(remoteDriver):
|
||||
# echo "command say this is a test" | nc localhost 22447
|
||||
self.fenrirSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.fenrirSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.fenrirSock.settimeout(5.0) # Prevent hanging on slow clients
|
||||
self.host = "127.0.0.1"
|
||||
self.port = self.env["runtime"]["SettingsManager"].get_setting_as_int(
|
||||
"remote", "port"
|
||||
@@ -45,14 +46,20 @@ class driver(remoteDriver):
|
||||
continue
|
||||
if self.fenrirSock in r:
|
||||
client_sock, client_addr = self.fenrirSock.accept()
|
||||
# Ensure client socket is always closed to prevent resource
|
||||
# leaks
|
||||
try:
|
||||
try:
|
||||
rawdata = client_sock.recv(8129)
|
||||
except Exception as e:
|
||||
self.env["runtime"]["DebugManager"].write_debug_out(
|
||||
"tcpDriver watch_dog: Error receiving data from client: "
|
||||
"tcpDriver watch_dog: Error receiving data from "
|
||||
"client: "
|
||||
+ str(e),
|
||||
debug.DebugLevel.ERROR,
|
||||
)
|
||||
rawdata = b"" # Set default empty data if recv fails
|
||||
|
||||
try:
|
||||
data = rawdata.decode("utf-8").rstrip().lstrip()
|
||||
event_queue.put(
|
||||
@@ -64,6 +71,8 @@ class driver(remoteDriver):
|
||||
+ str(e),
|
||||
debug.DebugLevel.ERROR,
|
||||
)
|
||||
finally:
|
||||
# Always close client socket, even if data processing fails
|
||||
try:
|
||||
client_sock.close()
|
||||
except Exception as e:
|
||||
|
||||
208
tests/PRE_COMMIT_INTEGRATION.md
Normal file
208
tests/PRE_COMMIT_INTEGRATION.md
Normal file
@@ -0,0 +1,208 @@
|
||||
# Pre-Commit Test Integration
|
||||
|
||||
## Overview
|
||||
|
||||
The test suite is now automatically executed as part of the pre-commit hook, ensuring all commits maintain code quality and passing tests.
|
||||
|
||||
## What Happens on Commit
|
||||
|
||||
When you run `git commit`, the pre-commit hook now performs **5 validation steps**:
|
||||
|
||||
```
|
||||
1. Python syntax validation (all files)
|
||||
2. Common issue detection (modified files)
|
||||
3. Core module import testing
|
||||
4. Test suite execution (37 tests) ← NEW!
|
||||
5. Secret/credential detection
|
||||
```
|
||||
|
||||
## Test Execution
|
||||
|
||||
```bash
|
||||
4. Running test suite...
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.13.7, pytest-8.4.2, pluggy-1.6.0
|
||||
rootdir: /home/storm/git/fenrir
|
||||
configfile: pytest.ini
|
||||
collected 37 items
|
||||
|
||||
tests/integration/test_remote_control.py .................... [ 54%]
|
||||
tests/unit/test_settings_validation.py ................. [100%]
|
||||
|
||||
============================== 37 passed in 0.44s ==============================
|
||||
✓ All tests passed
|
||||
```
|
||||
|
||||
**Performance:** Tests complete in **< 1 second**, adding minimal overhead to the commit process.
|
||||
|
||||
## Behavior
|
||||
|
||||
### ✅ Tests Pass - Commit Allowed
|
||||
```bash
|
||||
$ git commit -m "Add new feature"
|
||||
|
||||
Fenrir Pre-commit Validation
|
||||
==================================
|
||||
1. Validating Python syntax...
|
||||
✓ Syntax validation passed
|
||||
|
||||
2. Checking modified files...
|
||||
✓ No common issues found
|
||||
|
||||
3. Testing core module imports...
|
||||
✓ Core module imports successful
|
||||
|
||||
4. Running test suite...
|
||||
✓ All tests passed (37 passed in 0.44s)
|
||||
|
||||
5. Checking for potential secrets...
|
||||
✓ No potential secrets found
|
||||
|
||||
==================================================
|
||||
✓ All pre-commit validations passed
|
||||
Commit allowed to proceed
|
||||
```
|
||||
|
||||
### ❌ Tests Fail - Commit Blocked
|
||||
```bash
|
||||
$ git commit -m "Broken feature"
|
||||
|
||||
Fenrir Pre-commit Validation
|
||||
==================================
|
||||
[... earlier checks pass ...]
|
||||
|
||||
4. Running test suite...
|
||||
✗ Test suite failed
|
||||
Run: pytest tests/ -v (to see details)
|
||||
|
||||
==================================================
|
||||
✗ Pre-commit validation failed
|
||||
Commit blocked - please fix issues above
|
||||
|
||||
Quick fixes:
|
||||
• Python syntax: python3 tools/validate_syntax.py --fix
|
||||
• Run tests: pytest tests/ -v
|
||||
• Review flagged files manually
|
||||
• Re-run commit after fixes
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
The pre-commit hook is installed via:
|
||||
|
||||
```bash
|
||||
# One-time setup
|
||||
./tools/install_validation_hook.sh
|
||||
|
||||
# Or manually
|
||||
ln -sf ../../tools/pre-commit-hook .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
### Required
|
||||
- Python 3.7+
|
||||
- Git repository
|
||||
|
||||
### Optional but Recommended
|
||||
- `pytest` - For running tests (gracefully skipped if not installed)
|
||||
|
||||
If pytest is not installed, you'll see:
|
||||
```
|
||||
4. Running test suite...
|
||||
⚠ pytest not installed - skipping tests
|
||||
Install with: pip install pytest
|
||||
Or full test suite: pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
The commit will still proceed, but tests won't run.
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Catch bugs early** - Tests run before code reaches the repository
|
||||
2. **Maintain quality** - Broken code can't be committed
|
||||
3. **Fast feedback** - Tests complete in < 1 second
|
||||
4. **Zero overhead** - Gracefully degrades if pytest isn't installed
|
||||
5. **Confidence** - Know that all commits pass tests
|
||||
|
||||
## Bypassing the Hook
|
||||
|
||||
**Not recommended**, but if you need to commit without running tests:
|
||||
|
||||
```bash
|
||||
# Skip all pre-commit checks (use with caution!)
|
||||
git commit --no-verify -m "Emergency hotfix"
|
||||
```
|
||||
|
||||
**Warning:** Only use `--no-verify` for legitimate emergencies. Bypassing tests defeats their purpose.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Tests fail on commit but pass manually
|
||||
|
||||
```bash
|
||||
# Check environment matches
|
||||
cd /home/storm/git/fenrir
|
||||
pytest tests/ -v
|
||||
|
||||
# Verify PYTHONPATH
|
||||
echo $PYTHONPATH
|
||||
```
|
||||
|
||||
### Hook doesn't run tests
|
||||
|
||||
```bash
|
||||
# Check pytest is installed
|
||||
pytest --version
|
||||
|
||||
# Install if missing
|
||||
pip install pytest
|
||||
|
||||
# Or full test dependencies
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
### Hook takes too long
|
||||
|
||||
The test suite is designed to run in < 1 second. If it's slower:
|
||||
|
||||
```bash
|
||||
# Check test timing
|
||||
pytest tests/ --durations=10
|
||||
|
||||
# Look for slow tests (should all be < 100ms)
|
||||
```
|
||||
|
||||
## Statistics
|
||||
|
||||
- **Tests Executed:** 37
|
||||
- **Execution Time:** 0.44 seconds
|
||||
- **Pass Rate:** 100%
|
||||
- **Coverage:** Unit tests (17) + Integration tests (20)
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Possible additions to pre-commit validation:
|
||||
|
||||
- **Coverage threshold** - Require minimum test coverage percentage
|
||||
- **Performance regression** - Warn if tests get slower
|
||||
- **Incremental testing** - Only run tests for modified code
|
||||
- **Parallel execution** - Use `pytest -n auto` (requires pytest-xdist)
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- `tests/README.md` - Test strategy overview
|
||||
- `tests/TESTING_GUIDE.md` - Comprehensive testing guide
|
||||
- `TESTING_SUMMARY.md` - Test implementation summary
|
||||
- `tools/pre-commit-hook` - Pre-commit hook source code
|
||||
|
||||
## Summary
|
||||
|
||||
Adding tests to the pre-commit hook ensures:
|
||||
- ✅ All commits have passing tests
|
||||
- ✅ Regressions are caught immediately
|
||||
- ✅ Code quality is maintained
|
||||
- ✅ Minimal performance impact (< 1 second)
|
||||
- ✅ Graceful degradation without pytest
|
||||
|
||||
**Result:** Higher code quality with virtually zero developer friction.
|
||||
149
tests/README.md
Normal file
149
tests/README.md
Normal file
@@ -0,0 +1,149 @@
|
||||
# Fenrir Test Suite
|
||||
|
||||
This directory contains automated tests for the Fenrir screen reader. Testing a screen reader that requires root access and hardware interaction presents unique challenges, so we use a multi-layered testing strategy.
|
||||
|
||||
## Test Strategy
|
||||
|
||||
### 1. Unit Tests (No Root Required)
|
||||
Test individual components in isolation without requiring hardware access:
|
||||
- **Core Managers**: Logic testing without driver dependencies
|
||||
- **Utility Functions**: String manipulation, cursor calculations, text processing
|
||||
- **Settings Validation**: Configuration parsing and validation
|
||||
- **Remote Command Parsing**: Command/setting string processing
|
||||
|
||||
### 2. Integration Tests (No Root Required)
|
||||
Test component interactions using mocked drivers:
|
||||
- **Remote Control**: Unix socket and TCP communication
|
||||
- **Command System**: Command loading and execution flow
|
||||
- **Event Processing**: Event queue and dispatching
|
||||
- **Settings Manager**: Configuration loading and runtime changes
|
||||
|
||||
### 3. Driver Tests (Root Required, Optional)
|
||||
Test actual hardware interaction (only run in CI or explicitly by developers):
|
||||
- **VCSA Driver**: Screen reading on real TTY
|
||||
- **Evdev Driver**: Keyboard input capture
|
||||
- **Speech Drivers**: TTS output validation
|
||||
- **Sound Drivers**: Audio playback testing
|
||||
|
||||
### 4. End-to-End Tests (Root Required, Manual)
|
||||
Real-world usage scenarios run manually by developers:
|
||||
- Full Fenrir startup/shutdown cycle
|
||||
- Remote control from external scripts
|
||||
- VMenu navigation and command execution
|
||||
- Speech output for screen changes
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Install test dependencies
|
||||
pip install pytest pytest-cov pytest-mock pytest-timeout
|
||||
|
||||
# Run all unit and integration tests (no root required)
|
||||
pytest tests/
|
||||
|
||||
# Run with coverage report
|
||||
pytest tests/ --cov=src/fenrirscreenreader --cov-report=html
|
||||
|
||||
# Run only unit tests
|
||||
pytest tests/unit/
|
||||
|
||||
# Run only integration tests
|
||||
pytest tests/integration/
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/unit/test_settings_manager.py
|
||||
|
||||
# Run with verbose output
|
||||
pytest tests/ -v
|
||||
|
||||
# Run driver tests (requires root)
|
||||
sudo pytest tests/drivers/ -v
|
||||
```
|
||||
|
||||
## Test Organization
|
||||
|
||||
```
|
||||
tests/
|
||||
├── README.md # This file
|
||||
├── conftest.py # Shared pytest fixtures
|
||||
├── unit/ # Unit tests (fast, no mocking needed)
|
||||
│ ├── test_settings_validation.py
|
||||
│ ├── test_cursor_utils.py
|
||||
│ ├── test_text_utils.py
|
||||
│ └── test_remote_parsing.py
|
||||
├── integration/ # Integration tests (require mocking)
|
||||
│ ├── test_remote_control.py
|
||||
│ ├── test_command_manager.py
|
||||
│ ├── test_event_manager.py
|
||||
│ └── test_settings_manager.py
|
||||
└── drivers/ # Driver tests (require root)
|
||||
├── test_vcsa_driver.py
|
||||
├── test_evdev_driver.py
|
||||
└── test_speech_drivers.py
|
||||
```
|
||||
|
||||
## Writing Tests
|
||||
|
||||
### Example Unit Test
|
||||
```python
|
||||
def test_speech_rate_validation():
|
||||
"""Test that speech rate validation rejects out-of-range values."""
|
||||
manager = SettingsManager()
|
||||
|
||||
# Valid values should pass
|
||||
manager._validate_setting_value('speech', 'rate', 0.5)
|
||||
manager._validate_setting_value('speech', 'rate', 3.0)
|
||||
|
||||
# Invalid values should raise ValueError
|
||||
with pytest.raises(ValueError):
|
||||
manager._validate_setting_value('speech', 'rate', -1.0)
|
||||
with pytest.raises(ValueError):
|
||||
manager._validate_setting_value('speech', 'rate', 10.0)
|
||||
```
|
||||
|
||||
### Example Integration Test
|
||||
```python
|
||||
def test_remote_control_unix_socket(tmp_path):
|
||||
"""Test Unix socket remote control accepts commands."""
|
||||
socket_path = tmp_path / "test.sock"
|
||||
|
||||
# Start mock remote driver
|
||||
driver = MockUnixDriver(socket_path)
|
||||
|
||||
# Send command
|
||||
send_remote_command(socket_path, "command say Hello")
|
||||
|
||||
# Verify command was received
|
||||
assert driver.received_commands[-1] == "command say Hello"
|
||||
```
|
||||
|
||||
## Test Coverage Goals
|
||||
|
||||
- **Unit Tests**: 80%+ coverage on utility functions and validation logic
|
||||
- **Integration Tests**: 60%+ coverage on core managers and command system
|
||||
- **Overall**: 70%+ coverage on non-driver code
|
||||
|
||||
Driver code is excluded from coverage metrics as it requires hardware interaction.
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
Tests are designed to run in CI environments without root access:
|
||||
- Unit and integration tests run on every commit
|
||||
- Driver tests are skipped in CI (require actual hardware)
|
||||
- Coverage reports are generated and tracked over time
|
||||
|
||||
## Test Principles
|
||||
|
||||
1. **No Root by Default**: Most tests should run without elevated privileges
|
||||
2. **Fast Execution**: Unit tests complete in <1 second each
|
||||
3. **Isolated**: Tests don't depend on each other or external state
|
||||
4. **Deterministic**: Tests produce same results every run
|
||||
5. **Documented**: Each test has a clear docstring explaining what it tests
|
||||
6. **Realistic Mocks**: Mocked components behave like real ones
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
- **Performance Tests**: Measure input-to-speech latency
|
||||
- **Stress Tests**: Rapid event processing, memory leak detection
|
||||
- **Accessibility Tests**: Verify all features work without vision
|
||||
- **Compatibility Tests**: Test across different Linux distributions
|
||||
430
tests/TESTING_GUIDE.md
Normal file
430
tests/TESTING_GUIDE.md
Normal file
@@ -0,0 +1,430 @@
|
||||
# Fenrir Testing Guide
|
||||
|
||||
Complete guide to running and writing tests for the Fenrir screen reader.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Install Test Dependencies
|
||||
|
||||
```bash
|
||||
# Install test requirements
|
||||
pip install -r tests/requirements.txt
|
||||
|
||||
# Or install individually
|
||||
pip install pytest pytest-cov pytest-mock pytest-timeout
|
||||
```
|
||||
|
||||
### 2. Run Tests
|
||||
|
||||
```bash
|
||||
# Run all tests (unit + integration)
|
||||
pytest tests/
|
||||
|
||||
# Run only unit tests (fastest)
|
||||
pytest tests/unit/ -v
|
||||
|
||||
# Run only integration tests
|
||||
pytest tests/integration/ -v
|
||||
|
||||
# Run with coverage report
|
||||
pytest tests/ --cov=src/fenrirscreenreader --cov-report=html
|
||||
# Then open htmlcov/index.html in a browser
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/unit/test_settings_validation.py -v
|
||||
|
||||
# Run specific test class
|
||||
pytest tests/unit/test_settings_validation.py::TestSpeechSettingsValidation -v
|
||||
|
||||
# Run specific test
|
||||
pytest tests/unit/test_settings_validation.py::TestSpeechSettingsValidation::test_speech_rate_valid_range -v
|
||||
```
|
||||
|
||||
### 3. Useful Test Options
|
||||
|
||||
```bash
|
||||
# Stop on first failure
|
||||
pytest tests/ -x
|
||||
|
||||
# Show test output (print statements, logging)
|
||||
pytest tests/ -s
|
||||
|
||||
# Run tests in parallel (faster, requires: pip install pytest-xdist)
|
||||
pytest tests/ -n auto
|
||||
|
||||
# Show slowest 10 tests
|
||||
pytest tests/ --durations=10
|
||||
|
||||
# Run only tests matching a keyword
|
||||
pytest tests/ -k "remote"
|
||||
|
||||
# Run tests with specific markers
|
||||
pytest tests/ -m unit # Only unit tests
|
||||
pytest tests/ -m integration # Only integration tests
|
||||
pytest tests/ -m "not slow" # Skip slow tests
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── README.md # Test overview and strategy
|
||||
├── TESTING_GUIDE.md # This file - detailed usage guide
|
||||
├── requirements.txt # Test dependencies
|
||||
├── conftest.py # Shared fixtures and pytest config
|
||||
├── unit/ # Unit tests (fast, isolated)
|
||||
│ ├── __init__.py
|
||||
│ ├── test_settings_validation.py # Settings validation tests
|
||||
│ ├── test_cursor_utils.py # Cursor calculation tests
|
||||
│ └── test_text_utils.py # Text processing tests
|
||||
├── integration/ # Integration tests (require mocking)
|
||||
│ ├── __init__.py
|
||||
│ ├── test_remote_control.py # Remote control functionality
|
||||
│ ├── test_command_manager.py # Command loading/execution
|
||||
│ └── test_event_manager.py # Event queue processing
|
||||
└── drivers/ # Driver tests (require root)
|
||||
├── __init__.py
|
||||
├── test_vcsa_driver.py # TTY screen reading
|
||||
└── test_evdev_driver.py # Keyboard input capture
|
||||
```
|
||||
|
||||
## Writing New Tests
|
||||
|
||||
### Unit Test Example
|
||||
|
||||
```python
|
||||
"""tests/unit/test_my_feature.py"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_speech_rate_calculation():
|
||||
"""Test that speech rate is calculated correctly."""
|
||||
rate = calculate_speech_rate(0.5)
|
||||
assert 0.0 <= rate <= 1.0
|
||||
assert rate == 0.5
|
||||
```
|
||||
|
||||
### Integration Test Example
|
||||
|
||||
```python
|
||||
"""tests/integration/test_my_integration.py"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_remote_command_execution(mock_environment):
|
||||
"""Test remote command execution flow."""
|
||||
manager = RemoteManager()
|
||||
manager.initialize(mock_environment)
|
||||
|
||||
result = manager.handle_command_execution_with_response("say test")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"]["OutputManager"].speak_text.assert_called_once()
|
||||
```
|
||||
|
||||
### Using Fixtures
|
||||
|
||||
Common fixtures are defined in `conftest.py`:
|
||||
|
||||
```python
|
||||
def test_with_mock_environment(mock_environment):
|
||||
"""Use the shared mock environment fixture."""
|
||||
# mock_environment provides mocked runtime managers
|
||||
assert "runtime" in mock_environment
|
||||
assert "DebugManager" in mock_environment["runtime"]
|
||||
|
||||
def test_with_temp_config(temp_config_file):
|
||||
"""Use a temporary config file."""
|
||||
# temp_config_file is a Path object to a valid test config
|
||||
assert temp_config_file.exists()
|
||||
content = temp_config_file.read_text()
|
||||
assert "[speech]" in content
|
||||
```
|
||||
|
||||
## Test Markers
|
||||
|
||||
Tests can be marked to categorize them:
|
||||
|
||||
```python
|
||||
@pytest.mark.unit # Fast, isolated unit test
|
||||
@pytest.mark.integration # Integration test with mocking
|
||||
@pytest.mark.driver # Requires root access (skipped by default)
|
||||
@pytest.mark.slow # Takes > 1 second
|
||||
@pytest.mark.remote # Tests remote control functionality
|
||||
@pytest.mark.settings # Tests settings/configuration
|
||||
@pytest.mark.commands # Tests command system
|
||||
@pytest.mark.vmenu # Tests VMenu system
|
||||
```
|
||||
|
||||
Run tests by marker:
|
||||
```bash
|
||||
pytest tests/ -m unit # Only unit tests
|
||||
pytest tests/ -m "unit or integration" # Unit and integration
|
||||
pytest tests/ -m "not slow" # Skip slow tests
|
||||
```
|
||||
|
||||
## Code Coverage
|
||||
|
||||
### View Coverage Report
|
||||
|
||||
```bash
|
||||
# Generate HTML coverage report
|
||||
pytest tests/ --cov=src/fenrirscreenreader --cov-report=html
|
||||
|
||||
# Open report in browser
|
||||
firefox htmlcov/index.html # Or your preferred browser
|
||||
|
||||
# Terminal coverage report
|
||||
pytest tests/ --cov=src/fenrirscreenreader --cov-report=term-missing
|
||||
```
|
||||
|
||||
### Coverage Goals
|
||||
|
||||
- **Unit Tests**: 80%+ coverage on utility functions and validation logic
|
||||
- **Integration Tests**: 60%+ coverage on core managers
|
||||
- **Overall**: 70%+ coverage on non-driver code
|
||||
|
||||
Driver code is excluded from coverage as it requires hardware interaction.
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
### 1. Test One Thing
|
||||
|
||||
```python
|
||||
# Good - tests one specific behavior
|
||||
def test_speech_rate_rejects_negative():
|
||||
with pytest.raises(ValueError):
|
||||
validate_rate(-1.0)
|
||||
|
||||
# Bad - tests multiple unrelated things
|
||||
def test_speech_settings():
|
||||
validate_rate(0.5) # Rate validation
|
||||
validate_pitch(1.0) # Pitch validation
|
||||
validate_volume(0.8) # Volume validation
|
||||
```
|
||||
|
||||
### 2. Use Descriptive Names
|
||||
|
||||
```python
|
||||
# Good - clear what's being tested
|
||||
def test_speech_rate_rejects_values_above_three():
|
||||
...
|
||||
|
||||
# Bad - unclear purpose
|
||||
def test_rate():
|
||||
...
|
||||
```
|
||||
|
||||
### 3. Arrange-Act-Assert Pattern
|
||||
|
||||
```python
|
||||
def test_remote_command_parsing():
|
||||
# Arrange - set up test data
|
||||
manager = RemoteManager()
|
||||
command = "say Hello World"
|
||||
|
||||
# Act - execute the code being tested
|
||||
result = manager.parse_command(command)
|
||||
|
||||
# Assert - verify the result
|
||||
assert result["action"] == "say"
|
||||
assert result["text"] == "Hello World"
|
||||
```
|
||||
|
||||
### 4. Mock External Dependencies
|
||||
|
||||
```python
|
||||
def test_clipboard_export(mock_environment, tmp_path):
|
||||
"""Test clipboard export without real file operations."""
|
||||
# Use mock environment instead of real Fenrir runtime
|
||||
manager = RemoteManager()
|
||||
manager.initialize(mock_environment)
|
||||
|
||||
# Use temporary path instead of /tmp
|
||||
clipboard_path = tmp_path / "clipboard"
|
||||
mock_environment["runtime"]["SettingsManager"].get_setting = Mock(
|
||||
return_value=str(clipboard_path)
|
||||
)
|
||||
|
||||
manager.export_clipboard()
|
||||
|
||||
assert clipboard_path.exists()
|
||||
```
|
||||
|
||||
### 5. Test Error Paths
|
||||
|
||||
```python
|
||||
def test_remote_command_handles_invalid_input():
|
||||
"""Test that invalid commands are handled gracefully."""
|
||||
manager = RemoteManager()
|
||||
|
||||
# Test with various invalid inputs
|
||||
result1 = manager.handle_command_execution_with_response("")
|
||||
result2 = manager.handle_command_execution_with_response("invalid")
|
||||
result3 = manager.handle_command_execution_with_response("command unknown")
|
||||
|
||||
# All should return error results, not crash
|
||||
assert all(not r["success"] for r in [result1, result2, result3])
|
||||
```
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
### Run with More Verbosity
|
||||
|
||||
```bash
|
||||
# Show test names and outcomes
|
||||
pytest tests/ -v
|
||||
|
||||
# Show test names, outcomes, and print statements
|
||||
pytest tests/ -v -s
|
||||
|
||||
# Show local variables on failure
|
||||
pytest tests/ --showlocals
|
||||
|
||||
# Show full diff on assertion failures
|
||||
pytest tests/ -vv
|
||||
```
|
||||
|
||||
### Use pytest.set_trace() for Debugging
|
||||
|
||||
```python
|
||||
def test_complex_logic():
|
||||
result = complex_function()
|
||||
pytest.set_trace() # Drop into debugger here
|
||||
assert result == expected
|
||||
```
|
||||
|
||||
### Run Single Test Repeatedly
|
||||
|
||||
```bash
|
||||
# Useful for debugging flaky tests
|
||||
pytest tests/unit/test_my_test.py::test_specific_test --count=100
|
||||
```
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
### GitHub Actions Example
|
||||
|
||||
```yaml
|
||||
name: Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
pip install -r tests/requirements.txt
|
||||
- name: Run tests
|
||||
run: pytest tests/ --cov=src/fenrirscreenreader --cov-report=xml
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
```
|
||||
|
||||
## Common Issues
|
||||
|
||||
### ImportError: No module named 'fenrirscreenreader'
|
||||
|
||||
**Solution**: Make sure you're running pytest from the project root, or set PYTHONPATH:
|
||||
```bash
|
||||
export PYTHONPATH="${PYTHONPATH}:$(pwd)/src"
|
||||
pytest tests/
|
||||
```
|
||||
|
||||
### Tests hang or timeout
|
||||
|
||||
**Solution**: Use the timeout decorator or pytest-timeout:
|
||||
```bash
|
||||
pytest tests/ --timeout=30 # Global 30s timeout
|
||||
```
|
||||
|
||||
Or mark specific tests:
|
||||
```python
|
||||
@pytest.mark.timeout(5)
|
||||
def test_that_might_hang():
|
||||
...
|
||||
```
|
||||
|
||||
### Mocks not working as expected
|
||||
|
||||
**Solution**: Check that you're patching the right location:
|
||||
```python
|
||||
# Good - patch where it's used
|
||||
@patch('fenrirscreenreader.core.remoteManager.OutputManager')
|
||||
|
||||
# Bad - patch where it's defined
|
||||
@patch('fenrirscreenreader.core.outputManager.OutputManager')
|
||||
```
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
### Parametrized Tests
|
||||
|
||||
Test multiple inputs with one test:
|
||||
|
||||
```python
|
||||
@pytest.mark.parametrize("rate,expected", [
|
||||
(0.0, True),
|
||||
(1.5, True),
|
||||
(3.0, True),
|
||||
(-1.0, False),
|
||||
(10.0, False),
|
||||
])
|
||||
def test_rate_validation(rate, expected):
|
||||
try:
|
||||
validate_rate(rate)
|
||||
assert expected is True
|
||||
except ValueError:
|
||||
assert expected is False
|
||||
```
|
||||
|
||||
### Test Fixtures with Cleanup
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def temp_fenrir_instance():
|
||||
"""Start a test Fenrir instance."""
|
||||
fenrir = FenrirTestInstance()
|
||||
fenrir.start()
|
||||
|
||||
yield fenrir # Test runs here
|
||||
|
||||
# Cleanup after test
|
||||
fenrir.stop()
|
||||
fenrir.cleanup()
|
||||
```
|
||||
|
||||
### Testing Async Code
|
||||
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_speech():
|
||||
result = await async_speak("test")
|
||||
assert result.success
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Pytest Documentation**: https://docs.pytest.org/
|
||||
- **Fenrir Issues**: https://github.com/chrys87/fenrir/issues
|
||||
- **Test Coverage**: Run with `--cov-report=html` and inspect `htmlcov/index.html`
|
||||
|
||||
## Contributing Tests
|
||||
|
||||
When contributing tests:
|
||||
|
||||
1. **Follow naming conventions**: `test_*.py` for files, `test_*` for functions
|
||||
2. **Add docstrings**: Explain what each test verifies
|
||||
3. **Use appropriate markers**: `@pytest.mark.unit`, `@pytest.mark.integration`, etc.
|
||||
4. **Keep tests fast**: Unit tests should complete in <100ms
|
||||
5. **Test edge cases**: Empty strings, None, negative numbers, etc.
|
||||
6. **Update this guide**: If you add new test patterns or fixtures
|
||||
|
||||
Happy testing! 🧪
|
||||
225
tests/conftest.py
Normal file
225
tests/conftest.py
Normal file
@@ -0,0 +1,225 @@
|
||||
"""
|
||||
Shared pytest fixtures for Fenrir tests.
|
||||
|
||||
This file contains fixtures and configuration used across all test modules.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, Mock
|
||||
|
||||
import pytest
|
||||
|
||||
# Add src directory to Python path for imports
|
||||
fenrir_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(fenrir_root / "src"))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_environment():
|
||||
"""Create a minimal mock environment for testing.
|
||||
|
||||
Returns a mock environment dict with required runtime managers mocked.
|
||||
This allows testing components without initializing the full Fenrir stack.
|
||||
"""
|
||||
env = {
|
||||
"runtime": {
|
||||
"DebugManager": Mock(write_debug_out=Mock()),
|
||||
"OutputManager": Mock(
|
||||
present_text=Mock(),
|
||||
speak_text=Mock(),
|
||||
interrupt_output=Mock(),
|
||||
),
|
||||
"SettingsManager": Mock(
|
||||
get_setting=Mock(return_value="default"),
|
||||
get_setting_as_int=Mock(return_value=0),
|
||||
get_setting_as_float=Mock(return_value=0.0),
|
||||
get_setting_as_bool=Mock(return_value=True),
|
||||
),
|
||||
"InputManager": Mock(
|
||||
sendKeys=Mock(),
|
||||
handle_device_grab=Mock(),
|
||||
),
|
||||
"ScreenManager": Mock(update_screen_ignored=Mock()),
|
||||
"EventManager": Mock(stop_main_event_loop=Mock()),
|
||||
"MemoryManager": Mock(
|
||||
add_value_to_first_index=Mock(),
|
||||
get_index_list_element=Mock(return_value="test clipboard"),
|
||||
is_index_list_empty=Mock(return_value=False),
|
||||
),
|
||||
"VmenuManager": Mock(
|
||||
set_curr_menu=Mock(),
|
||||
),
|
||||
"CursorManager": Mock(
|
||||
set_window_for_application=Mock(),
|
||||
clear_window_for_application=Mock(),
|
||||
),
|
||||
},
|
||||
"settings": Mock(),
|
||||
"general": {
|
||||
"curr_user": "testuser",
|
||||
},
|
||||
}
|
||||
return env
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_config_file(tmp_path):
|
||||
"""Create a temporary configuration file for testing.
|
||||
|
||||
Returns path to a valid test configuration file.
|
||||
"""
|
||||
config_path = tmp_path / "test_settings.conf"
|
||||
config_content = """[sound]
|
||||
enabled=True
|
||||
driver=gstreamerDriver
|
||||
theme=default
|
||||
volume=0.7
|
||||
|
||||
[speech]
|
||||
enabled=True
|
||||
driver=speechdDriver
|
||||
rate=0.5
|
||||
pitch=0.5
|
||||
volume=1.0
|
||||
autoReadIncoming=True
|
||||
|
||||
[screen]
|
||||
driver=vcsaDriver
|
||||
encoding=auto
|
||||
screenUpdateDelay=0.05
|
||||
|
||||
[keyboard]
|
||||
driver=evdevDriver
|
||||
device=ALL
|
||||
grabDevices=True
|
||||
keyboardLayout=desktop
|
||||
|
||||
[general]
|
||||
debugLevel=2
|
||||
debugMode=File
|
||||
|
||||
[remote]
|
||||
enable=True
|
||||
driver=unixDriver
|
||||
port=22447
|
||||
enableSettingsRemote=True
|
||||
enableCommandRemote=True
|
||||
"""
|
||||
config_path.write_text(config_content)
|
||||
return config_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_socket_path(tmp_path):
|
||||
"""Create a temporary Unix socket path for testing.
|
||||
|
||||
Returns path that can be used for Unix socket testing.
|
||||
"""
|
||||
return tmp_path / "test_fenrir.sock"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_clipboard_file(tmp_path):
|
||||
"""Create a temporary clipboard file for testing.
|
||||
|
||||
Returns path to a temporary file for clipboard operations.
|
||||
"""
|
||||
clipboard_path = tmp_path / "fenrirClipboard"
|
||||
clipboard_path.write_text("")
|
||||
return clipboard_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_screen_data():
|
||||
"""Return sample screen data for testing screen-related functionality.
|
||||
|
||||
Returns dict with screen dimensions and content.
|
||||
"""
|
||||
return {
|
||||
"columns": 80,
|
||||
"lines": 24,
|
||||
"delta": "Hello World",
|
||||
"cursor": {"x": 0, "y": 0},
|
||||
"content": "Sample screen content\nSecond line\nThird line",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_remote_commands():
|
||||
"""Return sample remote control commands for testing.
|
||||
|
||||
Returns list of valid remote commands.
|
||||
"""
|
||||
return [
|
||||
"command say Hello World",
|
||||
"command interrupt",
|
||||
"setting set speech#rate=0.8",
|
||||
"setting set speech#pitch=0.6",
|
||||
"setting set sound#volume=0.5",
|
||||
"setting reset",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def invalid_remote_commands():
|
||||
"""Return invalid remote control commands for testing validation.
|
||||
|
||||
Returns list of commands that should be rejected.
|
||||
"""
|
||||
return [
|
||||
"setting set speech#rate=999", # Out of range
|
||||
"setting set speech#rate=-1", # Negative value
|
||||
"setting set speech#pitch=10", # Out of range
|
||||
"setting set speech#volume=-0.5", # Negative volume
|
||||
"setting set invalid#setting=value", # Invalid section
|
||||
"command unknown_command", # Unknown command
|
||||
]
|
||||
|
||||
|
||||
# Pytest hooks for test session customization
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Configure pytest with custom settings."""
|
||||
# Add custom markers
|
||||
config.addinivalue_line(
|
||||
"markers", "unit: Unit tests (fast, no mocking required)"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "integration: Integration tests (require mocking)"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "driver: Driver tests (require root access)"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "slow: Tests that take more than 1 second"
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
"""Modify test collection to skip driver tests unless explicitly run.
|
||||
|
||||
Driver tests require root access and hardware, so skip by default.
|
||||
Run with: pytest --run-driver-tests
|
||||
"""
|
||||
skip_driver = pytest.mark.skip(
|
||||
reason="Driver tests require root access (use --run-driver-tests)"
|
||||
)
|
||||
run_driver_tests = config.getoption("--run-driver-tests", default=False)
|
||||
|
||||
for item in items:
|
||||
if "driver" in item.keywords and not run_driver_tests:
|
||||
item.add_marker(skip_driver)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
"""Add custom command line options."""
|
||||
parser.addoption(
|
||||
"--run-driver-tests",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Run driver tests that require root access",
|
||||
)
|
||||
1
tests/integration/__init__.py
Normal file
1
tests/integration/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Integration tests for Fenrir screen reader components."""
|
||||
342
tests/integration/test_remote_control.py
Normal file
342
tests/integration/test_remote_control.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""
|
||||
Integration tests for remote control functionality.
|
||||
|
||||
Tests the remote control system including Unix socket and TCP communication,
|
||||
command parsing, and settings management.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import socket
|
||||
import time
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from fenrirscreenreader.core.remoteManager import RemoteManager
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
class TestRemoteCommandParsing:
|
||||
"""Test remote control command parsing and execution."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create RemoteManager instance for each test."""
|
||||
self.manager = RemoteManager()
|
||||
|
||||
def test_say_command_parsing(self, mock_environment):
|
||||
"""Test parsing of 'command say' messages."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"say Hello World"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert "Speaking" in result["message"]
|
||||
mock_environment["runtime"]["OutputManager"].speak_text.assert_called_once_with(
|
||||
"Hello World"
|
||||
)
|
||||
|
||||
def test_interrupt_command(self, mock_environment):
|
||||
"""Test speech interruption command."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response("interrupt")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"OutputManager"
|
||||
].interrupt_output.assert_called_once()
|
||||
|
||||
def test_setting_change_parsing(self, mock_environment):
|
||||
"""Test parsing of 'setting set' commands."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
# Mock parse_setting_args to verify it gets called
|
||||
with patch.object(
|
||||
mock_environment["runtime"]["SettingsManager"], "parse_setting_args"
|
||||
) as mock_parse:
|
||||
result = self.manager.handle_settings_change_with_response(
|
||||
"set speech#rate=0.8"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_parse.assert_called_once_with("speech#rate=0.8")
|
||||
|
||||
def test_clipboard_command(self, mock_environment):
|
||||
"""Test clipboard setting via remote control."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"clipboard Test clipboard content"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"MemoryManager"
|
||||
].add_value_to_first_index.assert_called_once_with(
|
||||
"clipboardHistory", "Test clipboard content"
|
||||
)
|
||||
|
||||
def test_quit_command(self, mock_environment):
|
||||
"""Test Fenrir shutdown command."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"quitapplication"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"EventManager"
|
||||
].stop_main_event_loop.assert_called_once()
|
||||
|
||||
def test_unknown_command_rejection(self, mock_environment):
|
||||
"""Test that unknown commands are rejected."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"unknown_command"
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Unknown command" in result["message"]
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
class TestRemoteSettingsControl:
|
||||
"""Test remote control of settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create RemoteManager instance for each test."""
|
||||
self.manager = RemoteManager()
|
||||
|
||||
def test_setting_reset(self, mock_environment):
|
||||
"""Test resetting settings to defaults."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
with patch.object(
|
||||
mock_environment["runtime"]["SettingsManager"], "reset_setting_arg_dict"
|
||||
) as mock_reset:
|
||||
result = self.manager.handle_settings_change_with_response("reset")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_reset.assert_called_once()
|
||||
|
||||
def test_setting_save(self, mock_environment):
|
||||
"""Test saving settings to file."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
mock_environment["runtime"]["SettingsManager"].get_settings_file = Mock(
|
||||
return_value="/tmp/test.conf"
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
mock_environment["runtime"]["SettingsManager"], "save_settings"
|
||||
) as mock_save:
|
||||
result = self.manager.handle_settings_change_with_response("save")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_save.assert_called_once()
|
||||
|
||||
def test_settings_remote_disabled(self, mock_environment):
|
||||
"""Test that settings commands are blocked when disabled."""
|
||||
mock_environment["runtime"]["SettingsManager"].get_setting_as_bool = Mock(
|
||||
return_value=False
|
||||
)
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_settings_change_with_response(
|
||||
"set speech#rate=0.5"
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "disabled" in result["message"].lower()
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
class TestRemoteDataFormat:
|
||||
"""Test remote control data format handling."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create RemoteManager instance for each test."""
|
||||
self.manager = RemoteManager()
|
||||
|
||||
def test_command_prefix_case_insensitive(self, mock_environment):
|
||||
"""Test that command prefixes are case-insensitive."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
# All of these should work
|
||||
result1 = self.manager.handle_remote_incomming_with_response(
|
||||
"COMMAND say test"
|
||||
)
|
||||
result2 = self.manager.handle_remote_incomming_with_response(
|
||||
"command say test"
|
||||
)
|
||||
result3 = self.manager.handle_remote_incomming_with_response(
|
||||
"CoMmAnD say test"
|
||||
)
|
||||
|
||||
assert all(r["success"] for r in [result1, result2, result3])
|
||||
|
||||
def test_setting_prefix_case_insensitive(self, mock_environment):
|
||||
"""Test that setting prefixes are case-insensitive."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
with patch.object(
|
||||
mock_environment["runtime"]["SettingsManager"], "parse_setting_args"
|
||||
):
|
||||
result1 = self.manager.handle_remote_incomming_with_response(
|
||||
"SETTING set speech#rate=0.5"
|
||||
)
|
||||
result2 = self.manager.handle_remote_incomming_with_response(
|
||||
"setting set speech#rate=0.5"
|
||||
)
|
||||
|
||||
assert all(r["success"] for r in [result1, result2])
|
||||
|
||||
def test_empty_data_handling(self, mock_environment):
|
||||
"""Test handling of empty remote data."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_remote_incomming_with_response("")
|
||||
|
||||
assert result["success"] is False
|
||||
assert "No data" in result["message"]
|
||||
|
||||
def test_invalid_format_rejection(self, mock_environment):
|
||||
"""Test rejection of invalid command format."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_remote_incomming_with_response(
|
||||
"invalid format without prefix"
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Unknown command format" in result["message"]
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
class TestWindowDefinition:
|
||||
"""Test window definition via remote control."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create RemoteManager instance for each test."""
|
||||
self.manager = RemoteManager()
|
||||
|
||||
def test_define_window_valid_coordinates(self, mock_environment):
|
||||
"""Test defining a window with valid coordinates."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"window 10 5 70 20"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"CursorManager"
|
||||
].set_window_for_application.assert_called_once()
|
||||
|
||||
# Verify the coordinates were parsed correctly
|
||||
call_args = mock_environment["runtime"][
|
||||
"CursorManager"
|
||||
].set_window_for_application.call_args
|
||||
start, end = call_args[0]
|
||||
assert start == {"x": 10, "y": 5}
|
||||
assert end == {"x": 70, "y": 20}
|
||||
|
||||
def test_define_window_insufficient_coordinates(self, mock_environment):
|
||||
"""Test that window definition with < 4 coordinates is ignored."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
# Should succeed but not call set_window_for_application
|
||||
result = self.manager.handle_command_execution_with_response("window 10 20")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"CursorManager"
|
||||
].set_window_for_application.assert_not_called()
|
||||
|
||||
def test_reset_window(self, mock_environment):
|
||||
"""Test resetting window to full screen."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response("resetwindow")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"][
|
||||
"CursorManager"
|
||||
].clear_window_for_application.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
class TestVMenuControl:
|
||||
"""Test VMenu control via remote."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create RemoteManager instance for each test."""
|
||||
self.manager = RemoteManager()
|
||||
|
||||
def test_set_vmenu(self, mock_environment):
|
||||
"""Test setting VMenu to specific menu."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response(
|
||||
"vmenu /vim/file/save"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"]["VmenuManager"].set_curr_menu.assert_called_once_with(
|
||||
"/vim/file/save"
|
||||
)
|
||||
|
||||
def test_reset_vmenu(self, mock_environment):
|
||||
"""Test resetting VMenu to default."""
|
||||
self.manager.initialize(mock_environment)
|
||||
|
||||
result = self.manager.handle_command_execution_with_response("resetvmenu")
|
||||
|
||||
assert result["success"] is True
|
||||
mock_environment["runtime"]["VmenuManager"].set_curr_menu.assert_called_once_with()
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.remote
|
||||
@pytest.mark.slow
|
||||
class TestRemoteControlThroughput:
|
||||
"""Test remote control performance characteristics."""
|
||||
|
||||
def test_rapid_say_commands(self, mock_environment):
|
||||
"""Test handling of rapid successive say commands."""
|
||||
manager = RemoteManager()
|
||||
manager.initialize(mock_environment)
|
||||
|
||||
# Send 100 rapid commands
|
||||
for i in range(100):
|
||||
result = manager.handle_command_execution_with_response(f"say test {i}")
|
||||
assert result["success"] is True
|
||||
|
||||
# Verify all were queued
|
||||
assert (
|
||||
mock_environment["runtime"]["OutputManager"].speak_text.call_count == 100
|
||||
)
|
||||
|
||||
def test_rapid_setting_changes(self, mock_environment):
|
||||
"""Test handling of rapid setting changes."""
|
||||
manager = RemoteManager()
|
||||
manager.initialize(mock_environment)
|
||||
|
||||
# Rapidly change speech rate
|
||||
with patch.object(
|
||||
mock_environment["runtime"]["SettingsManager"], "parse_setting_args"
|
||||
) as mock_parse:
|
||||
for rate in [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
|
||||
result = manager.handle_settings_change_with_response(
|
||||
f"set speech#rate={rate}"
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
assert mock_parse.call_count == 6
|
||||
21
tests/requirements.txt
Normal file
21
tests/requirements.txt
Normal file
@@ -0,0 +1,21 @@
|
||||
# Test dependencies for Fenrir screen reader
|
||||
# Install with: pip install -r tests/requirements.txt
|
||||
|
||||
# Core testing framework (required)
|
||||
pytest>=7.0.0
|
||||
|
||||
# Optional but recommended plugins
|
||||
pytest-cov>=4.0.0 # Coverage reporting (pytest --cov)
|
||||
pytest-mock>=3.10.0 # Enhanced mocking utilities
|
||||
pytest-timeout>=2.1.0 # Timeout for hanging tests (pytest --timeout)
|
||||
pytest-xdist>=3.0.0 # Parallel test execution (pytest -n auto)
|
||||
|
||||
# Additional testing utilities (optional)
|
||||
freezegun>=1.2.0 # Time mocking
|
||||
responses>=0.22.0 # HTTP mocking (for future web features)
|
||||
|
||||
# Minimal install (just pytest):
|
||||
# pip install pytest
|
||||
#
|
||||
# Full install (all features):
|
||||
# pip install -r tests/requirements.txt
|
||||
1
tests/unit/__init__.py
Normal file
1
tests/unit/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Unit tests for Fenrir screen reader components."""
|
||||
188
tests/unit/test_settings_validation.py
Normal file
188
tests/unit/test_settings_validation.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Unit tests for settings validation in SettingsManager.
|
||||
|
||||
Tests the _validate_setting_value method to ensure proper input validation
|
||||
for all configurable settings that could cause crashes or accessibility issues.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Import the settings manager
|
||||
from fenrirscreenreader.core.settingsManager import SettingsManager
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@pytest.mark.settings
|
||||
class TestSpeechSettingsValidation:
|
||||
"""Test validation of speech-related settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create a SettingsManager instance for each test."""
|
||||
self.manager = SettingsManager()
|
||||
|
||||
def test_speech_rate_valid_range(self):
|
||||
"""Speech rate should accept values between 0.0 and 3.0."""
|
||||
# Valid boundary values
|
||||
self.manager._validate_setting_value("speech", "rate", 0.0)
|
||||
self.manager._validate_setting_value("speech", "rate", 1.5)
|
||||
self.manager._validate_setting_value("speech", "rate", 3.0)
|
||||
|
||||
def test_speech_rate_rejects_negative(self):
|
||||
"""Speech rate should reject negative values."""
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 3.0"):
|
||||
self.manager._validate_setting_value("speech", "rate", -0.1)
|
||||
|
||||
def test_speech_rate_rejects_too_high(self):
|
||||
"""Speech rate should reject values above 3.0."""
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 3.0"):
|
||||
self.manager._validate_setting_value("speech", "rate", 10.0)
|
||||
|
||||
def test_speech_pitch_valid_range(self):
|
||||
"""Speech pitch should accept values between 0.0 and 2.0."""
|
||||
self.manager._validate_setting_value("speech", "pitch", 0.0)
|
||||
self.manager._validate_setting_value("speech", "pitch", 1.0)
|
||||
self.manager._validate_setting_value("speech", "pitch", 2.0)
|
||||
|
||||
def test_speech_pitch_rejects_invalid(self):
|
||||
"""Speech pitch should reject out-of-range values."""
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 2.0"):
|
||||
self.manager._validate_setting_value("speech", "pitch", -1.0)
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 2.0"):
|
||||
self.manager._validate_setting_value("speech", "pitch", 5.0)
|
||||
|
||||
def test_speech_volume_valid_range(self):
|
||||
"""Speech volume should accept values between 0.0 and 1.5."""
|
||||
self.manager._validate_setting_value("speech", "volume", 0.0)
|
||||
self.manager._validate_setting_value("speech", "volume", 1.0)
|
||||
self.manager._validate_setting_value("speech", "volume", 1.5)
|
||||
|
||||
def test_speech_volume_rejects_negative(self):
|
||||
"""Speech volume should reject negative values."""
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 1.5"):
|
||||
self.manager._validate_setting_value("speech", "volume", -0.5)
|
||||
|
||||
def test_speech_driver_whitelisted(self):
|
||||
"""Speech driver should only accept whitelisted values."""
|
||||
# Valid drivers
|
||||
self.manager._validate_setting_value("speech", "driver", "speechdDriver")
|
||||
self.manager._validate_setting_value("speech", "driver", "genericDriver")
|
||||
self.manager._validate_setting_value("speech", "driver", "dummyDriver")
|
||||
|
||||
# Invalid driver
|
||||
with pytest.raises(ValueError, match="Invalid speech driver"):
|
||||
self.manager._validate_setting_value(
|
||||
"speech", "driver", "nonexistentDriver"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@pytest.mark.settings
|
||||
class TestSoundSettingsValidation:
|
||||
"""Test validation of sound-related settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create a SettingsManager instance for each test."""
|
||||
self.manager = SettingsManager()
|
||||
|
||||
def test_sound_volume_valid_range(self):
|
||||
"""Sound volume should accept values between 0.0 and 1.5."""
|
||||
self.manager._validate_setting_value("sound", "volume", 0.0)
|
||||
self.manager._validate_setting_value("sound", "volume", 0.7)
|
||||
self.manager._validate_setting_value("sound", "volume", 1.5)
|
||||
|
||||
def test_sound_volume_rejects_invalid(self):
|
||||
"""Sound volume should reject out-of-range values."""
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 1.5"):
|
||||
self.manager._validate_setting_value("sound", "volume", -0.1)
|
||||
with pytest.raises(ValueError, match="must be between 0.0 and 1.5"):
|
||||
self.manager._validate_setting_value("sound", "volume", 2.0)
|
||||
|
||||
def test_sound_driver_whitelisted(self):
|
||||
"""Sound driver should only accept whitelisted values."""
|
||||
# Valid drivers
|
||||
self.manager._validate_setting_value("sound", "driver", "genericDriver")
|
||||
self.manager._validate_setting_value("sound", "driver", "gstreamerDriver")
|
||||
self.manager._validate_setting_value("sound", "driver", "dummyDriver")
|
||||
|
||||
# Invalid driver
|
||||
with pytest.raises(ValueError, match="Invalid sound driver"):
|
||||
self.manager._validate_setting_value("sound", "driver", "invalidDriver")
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@pytest.mark.settings
|
||||
class TestDriverValidation:
|
||||
"""Test validation of driver selection settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create a SettingsManager instance for each test."""
|
||||
self.manager = SettingsManager()
|
||||
|
||||
def test_screen_driver_whitelisted(self):
|
||||
"""Screen driver should only accept whitelisted values."""
|
||||
# Valid drivers
|
||||
self.manager._validate_setting_value("screen", "driver", "vcsaDriver")
|
||||
self.manager._validate_setting_value("screen", "driver", "ptyDriver")
|
||||
self.manager._validate_setting_value("screen", "driver", "dummyDriver")
|
||||
|
||||
# Invalid driver
|
||||
with pytest.raises(ValueError, match="Invalid screen driver"):
|
||||
self.manager._validate_setting_value("screen", "driver", "unknownDriver")
|
||||
|
||||
def test_keyboard_driver_whitelisted(self):
|
||||
"""Keyboard driver should only accept whitelisted values."""
|
||||
# Valid drivers
|
||||
self.manager._validate_setting_value("keyboard", "driver", "evdevDriver")
|
||||
self.manager._validate_setting_value("keyboard", "driver", "ptyDriver")
|
||||
self.manager._validate_setting_value("keyboard", "driver", "atspiDriver")
|
||||
self.manager._validate_setting_value("keyboard", "driver", "dummyDriver")
|
||||
|
||||
# Invalid driver
|
||||
with pytest.raises(ValueError, match="Invalid input driver"):
|
||||
self.manager._validate_setting_value("keyboard", "driver", "badDriver")
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@pytest.mark.settings
|
||||
class TestGeneralSettingsValidation:
|
||||
"""Test validation of general settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create a SettingsManager instance for each test."""
|
||||
self.manager = SettingsManager()
|
||||
|
||||
def test_debug_level_valid_range(self):
|
||||
"""Debug level should accept values 0-3."""
|
||||
self.manager._validate_setting_value("general", "debug_level", 0)
|
||||
self.manager._validate_setting_value("general", "debug_level", 1)
|
||||
self.manager._validate_setting_value("general", "debug_level", 2)
|
||||
self.manager._validate_setting_value("general", "debug_level", 3)
|
||||
|
||||
def test_debug_level_rejects_invalid(self):
|
||||
"""Debug level should reject values outside 0-3."""
|
||||
with pytest.raises(ValueError, match="must be between 0 and 3"):
|
||||
self.manager._validate_setting_value("general", "debug_level", -1)
|
||||
with pytest.raises(ValueError, match="must be between 0 and 3"):
|
||||
self.manager._validate_setting_value("general", "debug_level", 10)
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@pytest.mark.settings
|
||||
class TestValidationSkipsUnknownSettings:
|
||||
"""Test that validation doesn't error on unknown settings."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Create a SettingsManager instance for each test."""
|
||||
self.manager = SettingsManager()
|
||||
|
||||
def test_unknown_section_no_error(self):
|
||||
"""Unknown sections should not raise errors during validation."""
|
||||
# Should not raise - validation only applies to known critical settings
|
||||
self.manager._validate_setting_value("unknown_section", "setting", "value")
|
||||
|
||||
def test_unknown_setting_no_error(self):
|
||||
"""Unknown settings in known sections should not raise errors."""
|
||||
# Should not raise - only specific critical settings are validated
|
||||
self.manager._validate_setting_value("speech", "unknown_setting", "value")
|
||||
@@ -109,8 +109,25 @@ else
|
||||
echo -e "${GREEN}✓ Core module imports successful${NC}"
|
||||
fi
|
||||
|
||||
# 4. Check for secrets or sensitive data
|
||||
echo -e "\n${YELLOW}4. Checking for potential secrets...${NC}"
|
||||
# 4. Run test suite
|
||||
echo -e "\n${YELLOW}4. Running test suite...${NC}"
|
||||
if command -v pytest >/dev/null 2>&1; then
|
||||
# Run tests quietly, show summary at end
|
||||
if pytest tests/ -q --tb=short 2>&1 | tail -20; then
|
||||
echo -e "${GREEN}✓ All tests passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Test suite failed${NC}"
|
||||
echo "Run: pytest tests/ -v (to see details)"
|
||||
VALIDATION_FAILED=1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ pytest not installed - skipping tests${NC}"
|
||||
echo " Install with: pip install pytest"
|
||||
echo " Or full test suite: pip install -r tests/requirements.txt"
|
||||
fi
|
||||
|
||||
# 5. Check for secrets or sensitive data
|
||||
echo -e "\n${YELLOW}5. Checking for potential secrets...${NC}"
|
||||
SECRETS_FOUND=0
|
||||
|
||||
if [ -n "$STAGED_PYTHON_FILES" ]; then
|
||||
@@ -144,6 +161,7 @@ else
|
||||
echo ""
|
||||
echo "Quick fixes:"
|
||||
echo " • Python syntax: python3 tools/validate_syntax.py --fix"
|
||||
echo " • Run tests: pytest tests/ -v"
|
||||
echo " • Review flagged files manually"
|
||||
echo " • Re-run commit after fixes"
|
||||
exit 1
|
||||
|
||||
Reference in New Issue
Block a user