Redesigned the flood protection for incoming text, should hopefully be much better.
This commit is contained in:
@@ -75,6 +75,22 @@ auto_read_incoming=True
|
||||
# Speak individual numbers instead of whole string.
|
||||
read_numbers_as_digits = False
|
||||
|
||||
# Flood control: batch rapid updates instead of speaking each one
|
||||
# Number of updates within rapid_update_window to trigger batching
|
||||
rapid_update_threshold=5
|
||||
|
||||
# Time window (seconds) for detecting rapid updates
|
||||
rapid_update_window=0.3
|
||||
|
||||
# How often to speak batched content (seconds)
|
||||
batch_flush_interval=0.5
|
||||
|
||||
# Maximum lines to keep when batching (keeps newest, drops oldest)
|
||||
max_batch_lines=100
|
||||
|
||||
# Only enable flood control if this many new lines appear in the window
|
||||
flood_line_threshold=500
|
||||
|
||||
# genericSpeechCommand is the command that is executed for talking
|
||||
# the following variables are replaced with values
|
||||
# fenrirText = is the text that should be spoken
|
||||
|
||||
@@ -10,7 +10,11 @@ from fenrirscreenreader.core.i18n import _
|
||||
|
||||
class command:
|
||||
def __init__(self):
|
||||
pass
|
||||
self._update_times = []
|
||||
self._line_count_times = []
|
||||
self._batched_text = []
|
||||
self._last_flush_time = 0
|
||||
self._in_flood_mode = False
|
||||
|
||||
def initialize(self, environment):
|
||||
self.env = environment
|
||||
@@ -21,6 +25,73 @@ class command:
|
||||
def get_description(self):
|
||||
return _("Announces incoming text changes")
|
||||
|
||||
def _reset_flood_state(self):
|
||||
self._update_times = []
|
||||
self._line_count_times = []
|
||||
self._batched_text = []
|
||||
self._last_flush_time = 0
|
||||
self._in_flood_mode = False
|
||||
|
||||
def _is_rapid_updates(self):
|
||||
current_time = time.time()
|
||||
window = self.env["runtime"]["SettingsManager"].get_setting_as_float(
|
||||
"speech", "rapid_update_window"
|
||||
)
|
||||
threshold = self.env["runtime"]["SettingsManager"].get_setting_as_int(
|
||||
"speech", "rapid_update_threshold"
|
||||
)
|
||||
|
||||
self._update_times = [
|
||||
ts for ts in self._update_times if current_time - ts < window
|
||||
]
|
||||
self._update_times.append(current_time)
|
||||
|
||||
return len(self._update_times) >= threshold
|
||||
|
||||
def _is_high_volume(self, delta_text):
|
||||
current_time = time.time()
|
||||
window = self.env["runtime"]["SettingsManager"].get_setting_as_float(
|
||||
"speech", "rapid_update_window"
|
||||
)
|
||||
threshold = self.env["runtime"]["SettingsManager"].get_setting_as_int(
|
||||
"speech", "flood_line_threshold"
|
||||
)
|
||||
|
||||
line_count = max(1, delta_text.count("\n") + 1)
|
||||
self._line_count_times = [
|
||||
(ts, count)
|
||||
for ts, count in self._line_count_times
|
||||
if current_time - ts < window
|
||||
]
|
||||
self._line_count_times.append((current_time, line_count))
|
||||
|
||||
total_lines = sum(count for _, count in self._line_count_times)
|
||||
return total_lines >= threshold
|
||||
|
||||
def _add_to_batch(self, text):
|
||||
new_lines = text.splitlines()
|
||||
if text.endswith("\n"):
|
||||
new_lines.append("")
|
||||
self._batched_text.extend(new_lines)
|
||||
|
||||
max_lines = self.env["runtime"]["SettingsManager"].get_setting_as_int(
|
||||
"speech", "max_batch_lines"
|
||||
)
|
||||
if len(self._batched_text) > max_lines:
|
||||
self._batched_text = self._batched_text[-max_lines:]
|
||||
|
||||
def _flush_batch(self):
|
||||
if not self._batched_text:
|
||||
return
|
||||
|
||||
text = "\n".join(self._batched_text)
|
||||
self._batched_text = []
|
||||
self._last_flush_time = time.time()
|
||||
|
||||
self.env["runtime"]["OutputManager"].present_text(
|
||||
text, interrupt=False, flush=False
|
||||
)
|
||||
|
||||
def _was_handled_by_tab_completion(self, delta_text):
|
||||
"""Check if this delta was already handled by tab completion to avoid duplicates"""
|
||||
if "tabCompletion" not in self.env["commandBuffer"]:
|
||||
@@ -50,6 +121,9 @@ class command:
|
||||
return
|
||||
|
||||
delta_text = self.env["screen"]["new_delta"]
|
||||
|
||||
if self.env["runtime"]["ScreenManager"].is_screen_change():
|
||||
self._reset_flood_state()
|
||||
|
||||
# Skip if tab completion already handled this delta
|
||||
if self._was_handled_by_tab_completion(delta_text):
|
||||
@@ -71,6 +145,29 @@ class command:
|
||||
# <= 2:
|
||||
if "\n" not in delta_text:
|
||||
return
|
||||
|
||||
rapid = self._is_rapid_updates()
|
||||
high_volume = self._is_high_volume(delta_text)
|
||||
|
||||
if (rapid and high_volume) or self._in_flood_mode:
|
||||
if not self._in_flood_mode:
|
||||
self._last_flush_time = time.time()
|
||||
self._in_flood_mode = True
|
||||
|
||||
self._add_to_batch(delta_text)
|
||||
|
||||
interval = self.env["runtime"][
|
||||
"SettingsManager"
|
||||
].get_setting_as_float("speech", "batch_flush_interval")
|
||||
if time.time() - self._last_flush_time >= interval:
|
||||
self._flush_batch()
|
||||
|
||||
if not rapid or not high_volume:
|
||||
if self._batched_text:
|
||||
self._flush_batch()
|
||||
self._in_flood_mode = False
|
||||
return
|
||||
|
||||
# print(x_move, y_move, len(self.env['screen']['new_delta']), len(self.env['screen']['newNegativeDelta']))
|
||||
self.env["runtime"]["OutputManager"].present_text(
|
||||
delta_text, interrupt=False, flush=False
|
||||
|
||||
@@ -30,6 +30,11 @@ settings_data = {
|
||||
"language": "",
|
||||
"auto_read_incoming": True,
|
||||
"read_numbers_as_digits": False,
|
||||
"rapid_update_threshold": 5,
|
||||
"rapid_update_window": 0.3,
|
||||
"batch_flush_interval": 0.5,
|
||||
"max_batch_lines": 100,
|
||||
"flood_line_threshold": 500,
|
||||
"generic_speech_command": 'espeak -a fenrirVolume -s fenrirRate -p fenrirPitch -v fenrirVoice "fenrirText"',
|
||||
"fenrir_min_volume": 0,
|
||||
"fenrir_max_volume": 200,
|
||||
|
||||
Reference in New Issue
Block a user