Updates to the ai app. Should get better responses that read automatically when they arrive.

This commit is contained in:
Storm Dragon
2025-12-09 12:23:08 -05:00
parent 748fcde0b8
commit b4578e0035
+29 -16
View File
@@ -6,7 +6,8 @@ Provides accessibility-focused AI interaction with multiple providers
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, Gdk
gi.require_version('Atk', '1.0')
from gi.repository import Gtk, GLib, Gdk, Atk
import json
import os
import subprocess
@@ -17,7 +18,6 @@ import i3ipc
import threading
import requests
import time
import pyaudio
import wave
class SystemCommands:
@@ -63,29 +63,33 @@ class SystemCommands:
class VoiceRecognition:
"""Voice recognition system for AI assistant"""
def __init__(self, config):
self.config = config
self.is_recording = False
self.audio = None
self.stream = None
# Audio settings
self.sample_rate = 16000
self.chunk_size = 1024
self.audio_format = pyaudio.paInt16
self.channels = 1
try:
import pyaudio
import speech_recognition as sr
self.pyaudio = pyaudio
self.audio_format = pyaudio.paInt16
self.recognizer = sr.Recognizer()
self.microphone = sr.Microphone()
self.sr_available = True
# Adjust for ambient noise
with self.microphone as source:
self.recognizer.adjust_for_ambient_noise(source)
except ImportError:
self.pyaudio = None
self.audio_format = None
self.sr_available = False
self.recognizer = None
self.microphone = None
@@ -96,11 +100,11 @@ class VoiceRecognition:
def start_recording(self):
"""Start recording audio"""
if not self.sr_available:
if not self.sr_available or not self.pyaudio:
return False
try:
self.audio = pyaudio.PyAudio()
self.audio = self.pyaudio.PyAudio()
self.is_recording = True
return True
except Exception as e:
@@ -287,8 +291,9 @@ class OllamaInterface:
'prompt': message,
'stream': False
}
if context and not context.startswith("You are a helpful AI assistant"):
# Add system context if provided
if context:
data['system'] = context
# Handle image if provided
@@ -332,9 +337,9 @@ class ClaudeCodeInterface:
"""Send message to Claude Code"""
try:
cmd = ['claude']
# Add context if provided
if context and not context.startswith("You are a helpful AI assistant"):
if context:
message = f"Context: {context}\n\n{message}"
# Add image if provided
@@ -372,7 +377,7 @@ class CodexCliInterface:
"""Send message to Codex CLI using non-interactive exec mode"""
try:
full_message = message
if context and not context.startswith("You are a helpful AI assistant"):
if context:
full_message = f"Context: {context}\n\n{message}"
cmd = ['codex', 'exec', '--skip-git-repo-check', '--sandbox', 'read-only']
@@ -697,11 +702,15 @@ class AiAssistant(Gtk.Window):
self.responseText.set_editable(False)
self.responseText.set_can_focus(True)
self.responseText.set_accepts_tab(False)
# Set accessibility properties for response
response_atk = self.responseText.get_accessible()
response_atk.set_name("AI Response")
response_atk.set_description("AI assistant's response to your question")
# Make this a live region so screen readers automatically announce new content
# Note: ATK live region support in GTK3 is limited, so we rely primarily on focus management
# in set_response_text() to ensure Orca reads new responses
# Link response label to text view
self.responseLabel.set_mnemonic_widget(self.responseText)
@@ -1298,6 +1307,10 @@ class AiAssistant(Gtk.Window):
"""Set text in response text view"""
buffer = self.responseText.get_buffer()
buffer.set_text(text)
# Move focus to the response so Orca reads it immediately
# This is the most reliable way to ensure screen readers announce new content
GLib.idle_add(self.responseText.grab_focus)
def append_response_text(self, text):
"""Append text to response text view"""