diff --git a/protocol_prototype/DryBox/UI/audio_player.py b/protocol_prototype/DryBox/UI/audio_player.py index 6b922d7..a905c63 100644 --- a/protocol_prototype/DryBox/UI/audio_player.py +++ b/protocol_prototype/DryBox/UI/audio_player.py @@ -65,13 +65,14 @@ class AudioPlayer(QObject): return False try: - # Create audio stream + # Create audio stream with larger buffer to prevent underruns stream = self.audio.open( format=pyaudio.paInt16, channels=self.channels, rate=self.sample_rate, output=True, - frames_per_buffer=self.chunk_size + frames_per_buffer=self.chunk_size * 2, # Doubled buffer size + stream_callback=None ) self.streams[client_id] = stream @@ -159,26 +160,37 @@ class AudioPlayer(QObject): self.debug(f"Playback thread started for client {client_id}") + # Buffer to accumulate data before playing + accumulated_data = b'' + min_buffer_size = self.chunk_size * 2 # Buffer at least 2 chunks before playing + while self.playback_enabled.get(client_id, False): try: # Get audio data from buffer with timeout - audio_data = buffer.get(timeout=0.1) + audio_data = buffer.get(timeout=0.04) # 40ms timeout + accumulated_data += audio_data - # Only log first frame to avoid spam - if not hasattr(self, '_playback_logged'): - self._playback_logged = {} - if client_id not in self._playback_logged: - self._playback_logged[client_id] = False + # Only play when we have enough data to prevent underruns + if len(accumulated_data) >= min_buffer_size: + # Only log first frame to avoid spam + if not hasattr(self, '_playback_logged'): + self._playback_logged = {} + if client_id not in self._playback_logged: + self._playback_logged[client_id] = False + + if not self._playback_logged[client_id]: + self.debug(f"Client {client_id} playback thread playing first frame: {len(accumulated_data)} bytes") + self._playback_logged[client_id] = True - if not self._playback_logged[client_id]: - self.debug(f"Client {client_id} playback thread playing first frame: {len(audio_data)} bytes") - self._playback_logged[client_id] = True - - # Play audio - stream.write(audio_data) + # Play accumulated audio + stream.write(accumulated_data[:min_buffer_size]) + accumulated_data = accumulated_data[min_buffer_size:] except queue.Empty: - # No data available, continue + # If we have some accumulated data, play it to avoid gaps + if len(accumulated_data) >= self.chunk_size: + stream.write(accumulated_data[:self.chunk_size]) + accumulated_data = accumulated_data[self.chunk_size:] continue except Exception as e: self.debug(f"Playback error for client {client_id}: {e}") diff --git a/protocol_prototype/DryBox/UI/main.py b/protocol_prototype/DryBox/UI/main.py index 8d7ce77..0b2822f 100644 --- a/protocol_prototype/DryBox/UI/main.py +++ b/protocol_prototype/DryBox/UI/main.py @@ -17,11 +17,11 @@ class PhoneUI(QMainWindow): def __init__(self): super().__init__() - self.setWindowTitle("DryBox - Noise XK + Codec2 + 4FSK") - self.setGeometry(100, 100, 1200, 900) + self.setWindowTitle("DryBox - Noise XK + Codec2 (3200bps) + 4FSK") + self.setGeometry(100, 100, 1400, 1000) # Set minimum size to ensure window is resizable - self.setMinimumSize(800, 600) + self.setMinimumSize(1200, 800) # Auto test state self.auto_test_running = False @@ -54,7 +54,8 @@ class PhoneUI(QMainWindow): QWidget#phoneWidget { border: 2px solid #4A4A4A; border-radius: 10px; background-color: #3A3A3A; - min-width: 250px; + min-width: 400px; + padding: 10px; } QTextEdit#debugConsole { background-color: #1E1E1E; color: #00FF00; @@ -99,7 +100,7 @@ class PhoneUI(QMainWindow): phones_layout.addWidget(app_title_label) # Protocol info - protocol_info = QLabel("Noise XK + Codec2 (1200bps) + 4FSK") + protocol_info = QLabel("Noise XK + Codec2 (3200bps) + 4FSK") protocol_info.setAlignment(Qt.AlignCenter) protocol_info.setStyleSheet("font-size: 12px; color: #00A2E8;") phones_layout.addWidget(protocol_info) @@ -205,8 +206,8 @@ class PhoneUI(QMainWindow): phone_container_widget.setObjectName("phoneWidget") phone_container_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) phone_layout = QVBoxLayout() - phone_layout.setAlignment(Qt.AlignCenter) - phone_layout.setSpacing(10) + phone_layout.setAlignment(Qt.AlignTop) + phone_layout.setSpacing(15) phone_layout.setContentsMargins(15, 15, 15, 15) phone_container_widget.setLayout(phone_layout) @@ -215,11 +216,18 @@ class PhoneUI(QMainWindow): phone_title_label.setAlignment(Qt.AlignCenter) phone_layout.addWidget(phone_title_label) + # Phone display section + phone_display_section = QWidget() + display_section_layout = QVBoxLayout() + display_section_layout.setSpacing(10) + display_section_layout.setContentsMargins(0, 0, 0, 0) + phone_display_section.setLayout(display_section_layout) + phone_display_frame = QFrame() phone_display_frame.setObjectName("phoneDisplay") - phone_display_frame.setMinimumSize(200, 250) - phone_display_frame.setMaximumSize(300, 400) - phone_display_frame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) + phone_display_frame.setMinimumSize(250, 200) + phone_display_frame.setMaximumSize(350, 250) + phone_display_frame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) display_content_layout = QVBoxLayout(phone_display_frame) display_content_layout.setAlignment(Qt.AlignCenter) @@ -227,36 +235,54 @@ class PhoneUI(QMainWindow): phone_status_label.setAlignment(Qt.AlignCenter) phone_status_label.setFont(QFont("Arial", 16)) display_content_layout.addWidget(phone_status_label) - phone_layout.addWidget(phone_display_frame, alignment=Qt.AlignCenter) + display_section_layout.addWidget(phone_display_frame, alignment=Qt.AlignCenter) phone_button = QPushButton() phone_button.setMinimumWidth(100) phone_button.setMaximumWidth(150) phone_button.setIconSize(QSize(20, 20)) phone_button.clicked.connect(action_slot) - phone_layout.addWidget(phone_button, alignment=Qt.AlignCenter) + display_section_layout.addWidget(phone_button, alignment=Qt.AlignCenter) + + phone_layout.addWidget(phone_display_section) + + # Add separator + separator = QFrame() + separator.setFrameShape(QFrame.HLine) + separator.setFrameShadow(QFrame.Sunken) + separator.setStyleSheet("background-color: #4A4A4A; max-height: 2px;") + phone_layout.addWidget(separator) + # Waveforms section + waveforms_section = QWidget() + waveforms_layout = QVBoxLayout() + waveforms_layout.setSpacing(10) + waveforms_layout.setContentsMargins(0, 0, 0, 0) + waveforms_section.setLayout(waveforms_layout) + # Received waveform waveform_label = QLabel(f"{title} Received") waveform_label.setAlignment(Qt.AlignCenter) waveform_label.setStyleSheet("font-size: 12px; color: #E0E0E0;") - phone_layout.addWidget(waveform_label) + waveforms_layout.addWidget(waveform_label) waveform_widget = WaveformWidget(dynamic=False) - waveform_widget.setMinimumSize(200, 50) - waveform_widget.setMaximumSize(300, 80) + waveform_widget.setMinimumSize(250, 50) + waveform_widget.setMaximumSize(350, 60) waveform_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) - phone_layout.addWidget(waveform_widget, alignment=Qt.AlignCenter) + waveforms_layout.addWidget(waveform_widget, alignment=Qt.AlignCenter) # Sent waveform sent_waveform_label = QLabel(f"{title} Sent") sent_waveform_label.setAlignment(Qt.AlignCenter) sent_waveform_label.setStyleSheet("font-size: 12px; color: #E0E0E0;") - phone_layout.addWidget(sent_waveform_label) + waveforms_layout.addWidget(sent_waveform_label) sent_waveform_widget = WaveformWidget(dynamic=False) - sent_waveform_widget.setMinimumSize(200, 50) - sent_waveform_widget.setMaximumSize(300, 80) + sent_waveform_widget.setMinimumSize(250, 50) + sent_waveform_widget.setMaximumSize(350, 60) sent_waveform_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) - phone_layout.addWidget(sent_waveform_widget, alignment=Qt.AlignCenter) + waveforms_layout.addWidget(sent_waveform_widget, alignment=Qt.AlignCenter) + + phone_layout.addWidget(waveforms_section) # Audio control buttons audio_controls_layout = QHBoxLayout() diff --git a/protocol_prototype/DryBox/UI/phone_manager.py b/protocol_prototype/DryBox/UI/phone_manager.py index d53f837..0bba3e7 100644 --- a/protocol_prototype/DryBox/UI/phone_manager.py +++ b/protocol_prototype/DryBox/UI/phone_manager.py @@ -145,10 +145,10 @@ class PhoneManager: # Send through protocol (codec + 4FSK + encryption) phone['client'].send_voice_frame(frames) - # Update waveform - if len(frames) >= 2: - samples = struct.unpack(f'{len(frames)//2}h', frames) - self.update_sent_waveform(phone_id, frames) + # Update waveform only every 5 frames to reduce CPU usage + if phone['frame_counter'] % 5 == 0: + if len(frames) >= 2: + self.update_sent_waveform(phone_id, frames) # If playback is enabled on the sender, play the original audio if phone['playback_enabled']: @@ -198,8 +198,6 @@ class PhoneManager: if len(data) < 320: # Less than 160 samples (too small for audio) self.debug(f"Phone {client_id + 1} received non-audio data: {len(data)} bytes (ignoring)") return - - self.phones[client_id]['waveform'].set_data(data) # Debug log audio data reception (only occasionally to avoid spam) if not hasattr(self, '_audio_frame_count'): @@ -208,6 +206,10 @@ class PhoneManager: self._audio_frame_count[client_id] = 0 self._audio_frame_count[client_id] += 1 + # Update waveform only every 5 frames to reduce CPU usage + if self._audio_frame_count[client_id] % 5 == 0: + self.phones[client_id]['waveform'].set_data(data) + if self._audio_frame_count[client_id] == 1 or self._audio_frame_count[client_id] % 25 == 0: self.debug(f"Phone {client_id + 1} received audio frame #{self._audio_frame_count[client_id]}: {len(data)} bytes") diff --git a/protocol_prototype/DryBox/UI/protocol_phone_client.py b/protocol_prototype/DryBox/UI/protocol_phone_client.py index f8750b2..fa8f862 100644 --- a/protocol_prototype/DryBox/UI/protocol_phone_client.py +++ b/protocol_prototype/DryBox/UI/protocol_phone_client.py @@ -37,8 +37,8 @@ class ProtocolPhoneClient(QThread): # No buffer needed with larger frame size - # Voice codec components - self.codec = Codec2Wrapper(mode=Codec2Mode.MODE_1200) + # Voice codec components - use higher quality mode + self.codec = Codec2Wrapper(mode=Codec2Mode.MODE_3200) # Changed from 1200 to 3200 bps for better quality self.modem = FSKModem() # Voice encryption handled by Noise XK @@ -226,7 +226,7 @@ class ProtocolPhoneClient(QThread): # Create Codec2Frame from demodulated data from voice_codec import Codec2Frame, Codec2Mode frame = Codec2Frame( - mode=Codec2Mode.MODE_1200, + mode=Codec2Mode.MODE_3200, # Match the encoder mode bits=demodulated_data, timestamp=time.time(), frame_number=self.voice_frame_counter diff --git a/protocol_prototype/DryBox/UI/waveform_widget.py b/protocol_prototype/DryBox/UI/waveform_widget.py index bb507a0..2359f0f 100644 --- a/protocol_prototype/DryBox/UI/waveform_widget.py +++ b/protocol_prototype/DryBox/UI/waveform_widget.py @@ -10,15 +10,18 @@ class WaveformWidget(QWidget): self.dynamic = dynamic self.setMinimumSize(200, 60) self.setMaximumHeight(80) - self.waveform_data = [random.randint(10, 90) for _ in range(50)] + # Start with flat line instead of random data + self.waveform_data = [50 for _ in range(50)] if self.dynamic: self.timer = QTimer(self) self.timer.timeout.connect(self.update_waveform) self.timer.start(100) def update_waveform(self): - self.waveform_data = self.waveform_data[1:] + [random.randint(10, 90)] - self.update() + # Only update with random data if dynamic mode is enabled + if self.dynamic: + self.waveform_data = self.waveform_data[1:] + [random.randint(10, 90)] + self.update() def set_data(self, data): # Convert audio data to visual amplitude diff --git a/protocol_prototype/DryBox/wav/test_codec_only.wav b/protocol_prototype/DryBox/wav/test_codec_only.wav deleted file mode 100644 index b5f4502..0000000 Binary files a/protocol_prototype/DryBox/wav/test_codec_only.wav and /dev/null differ diff --git a/protocol_prototype/DryBox/wav/test_full_pipeline.wav b/protocol_prototype/DryBox/wav/test_full_pipeline.wav deleted file mode 100644 index b5f4502..0000000 Binary files a/protocol_prototype/DryBox/wav/test_full_pipeline.wav and /dev/null differ