gglib_core/
settings.rs

1//! Settings domain types and validation.
2//!
3//! This module contains the core settings types used across the application.
4//! These are pure domain types with no infrastructure dependencies.
5
6use serde::{Deserialize, Serialize};
7
8use crate::domain::InferenceConfig;
9
10/// Default port for the OpenAI-compatible proxy server.
11pub const DEFAULT_PROXY_PORT: u16 = 8080;
12
13/// Default base port for llama-server instance allocation.
14pub const DEFAULT_LLAMA_BASE_PORT: u16 = 9000;
15
16/// Application settings structure.
17///
18/// All fields are optional to support partial updates and graceful defaults.
19#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
20#[serde(default)]
21pub struct Settings {
22    /// Default directory for downloading models.
23    pub default_download_path: Option<String>,
24
25    /// Default context size for models (e.g., 4096, 8192).
26    pub default_context_size: Option<u64>,
27
28    /// Port for the OpenAI-compatible proxy server.
29    pub proxy_port: Option<u16>,
30
31    /// Base port for llama-server instance allocation (first port in range).
32    /// Note: The OpenAI-compatible proxy listens on `proxy_port`.
33    pub llama_base_port: Option<u16>,
34
35    /// Maximum number of downloads that can be queued (1-50).
36    pub max_download_queue_size: Option<u32>,
37
38    /// Whether to show memory fit indicators in `HuggingFace` browser.
39    pub show_memory_fit_indicators: Option<bool>,
40
41    /// Maximum iterations for tool calling agentic loop.
42    pub max_tool_iterations: Option<u32>,
43
44    /// Maximum stagnation steps before stopping agent loop.
45    pub max_stagnation_steps: Option<u32>,
46
47    /// Default model ID for commands that support a default model.
48    pub default_model_id: Option<i64>,
49
50    /// Global inference parameter defaults.
51    ///
52    /// Applied when neither request nor per-model defaults are specified.
53    /// If not set, hardcoded defaults are used as final fallback.
54    #[serde(default)]
55    pub inference_defaults: Option<InferenceConfig>,
56
57    // ── Voice settings ─────────────────────────────────────────────
58    /// Whether voice mode is enabled.
59    pub voice_enabled: Option<bool>,
60
61    /// Voice interaction mode: "ptt" (push-to-talk) or "vad" (voice activity detection).
62    pub voice_interaction_mode: Option<String>,
63
64    /// Selected whisper STT model ID (e.g., "base.en", "small.en-q5_1").
65    pub voice_stt_model: Option<String>,
66
67    /// Selected TTS voice ID (e.g., `af_sarah`, `am_michael`).
68    pub voice_tts_voice: Option<String>,
69
70    /// TTS playback speed multiplier (0.5–2.0, default 1.0).
71    pub voice_tts_speed: Option<f32>,
72
73    /// VAD speech detection threshold (0.0–1.0, default 0.5).
74    pub voice_vad_threshold: Option<f32>,
75
76    /// VAD minimum silence duration in ms before utterance ends (default 700).
77    pub voice_vad_silence_ms: Option<u32>,
78
79    /// Whether to automatically speak LLM responses via TTS.
80    pub voice_auto_speak: Option<bool>,
81
82    /// Preferred audio input device name (None = system default).
83    pub voice_input_device: Option<String>,
84
85    // ── Setup wizard ────────────────────────────────────────────────
86    /// Whether the first-run setup wizard has been completed.
87    pub setup_completed: Option<bool>,
88}
89
90impl Settings {
91    /// Create settings with sensible defaults.
92    #[must_use]
93    pub const fn with_defaults() -> Self {
94        Self {
95            default_download_path: None,
96            default_context_size: Some(4096),
97            proxy_port: Some(DEFAULT_PROXY_PORT),
98            llama_base_port: Some(DEFAULT_LLAMA_BASE_PORT),
99            max_download_queue_size: Some(10),
100            show_memory_fit_indicators: Some(true),
101            max_tool_iterations: Some(25),
102            max_stagnation_steps: Some(5),
103            default_model_id: None,
104            inference_defaults: None,
105            voice_enabled: Some(false),
106            voice_interaction_mode: None,
107            voice_stt_model: None,
108            voice_tts_voice: None,
109            voice_tts_speed: Some(1.0),
110            voice_vad_threshold: None,
111            voice_vad_silence_ms: None,
112            voice_auto_speak: Some(true),
113            voice_input_device: None,
114            setup_completed: None,
115        }
116    }
117
118    /// Get the effective proxy port (with default fallback).
119    #[must_use]
120    pub const fn effective_proxy_port(&self) -> u16 {
121        match self.proxy_port {
122            Some(port) => port,
123            None => DEFAULT_PROXY_PORT,
124        }
125    }
126
127    /// Get the effective llama-server base port (with default fallback).
128    #[must_use]
129    pub const fn effective_llama_base_port(&self) -> u16 {
130        match self.llama_base_port {
131            Some(port) => port,
132            None => DEFAULT_LLAMA_BASE_PORT,
133        }
134    }
135
136    /// Merge another settings into this one, only updating fields that are Some.
137    pub fn merge(&mut self, other: &SettingsUpdate) {
138        if let Some(ref path) = other.default_download_path {
139            self.default_download_path.clone_from(path);
140        }
141        if let Some(ref ctx_size) = other.default_context_size {
142            self.default_context_size = *ctx_size;
143        }
144        if let Some(ref port) = other.proxy_port {
145            self.proxy_port = *port;
146        }
147        if let Some(ref port) = other.llama_base_port {
148            self.llama_base_port = *port;
149        }
150        if let Some(ref queue_size) = other.max_download_queue_size {
151            self.max_download_queue_size = *queue_size;
152        }
153        if let Some(ref show_fit) = other.show_memory_fit_indicators {
154            self.show_memory_fit_indicators = *show_fit;
155        }
156        if let Some(ref iters) = other.max_tool_iterations {
157            self.max_tool_iterations = *iters;
158        }
159        if let Some(ref steps) = other.max_stagnation_steps {
160            self.max_stagnation_steps = *steps;
161        }
162        if let Some(ref model_id) = other.default_model_id {
163            self.default_model_id = *model_id;
164        }
165        if let Some(ref inference_defaults) = other.inference_defaults {
166            self.inference_defaults.clone_from(inference_defaults);
167        }
168        if let Some(ref v) = other.voice_enabled {
169            self.voice_enabled = *v;
170        }
171        if let Some(ref v) = other.voice_interaction_mode {
172            self.voice_interaction_mode.clone_from(v);
173        }
174        if let Some(ref v) = other.voice_stt_model {
175            self.voice_stt_model.clone_from(v);
176        }
177        if let Some(ref v) = other.voice_tts_voice {
178            self.voice_tts_voice.clone_from(v);
179        }
180        if let Some(ref v) = other.voice_tts_speed {
181            self.voice_tts_speed = *v;
182        }
183        if let Some(ref v) = other.voice_vad_threshold {
184            self.voice_vad_threshold = *v;
185        }
186        if let Some(ref v) = other.voice_vad_silence_ms {
187            self.voice_vad_silence_ms = *v;
188        }
189        if let Some(ref v) = other.voice_auto_speak {
190            self.voice_auto_speak = *v;
191        }
192        if let Some(ref v) = other.voice_input_device {
193            self.voice_input_device.clone_from(v);
194        }
195        if let Some(ref v) = other.setup_completed {
196            self.setup_completed = *v;
197        }
198    }
199}
200
201/// Partial settings update.
202///
203/// Each field is `Option<Option<T>>`:
204/// - `None` = don't change this field
205/// - `Some(None)` = set field to None/null
206/// - `Some(Some(value))` = set field to value
207#[derive(Debug, Clone, Default, Serialize, Deserialize)]
208pub struct SettingsUpdate {
209    pub default_download_path: Option<Option<String>>,
210    pub default_context_size: Option<Option<u64>>,
211    pub proxy_port: Option<Option<u16>>,
212    pub llama_base_port: Option<Option<u16>>,
213    pub max_download_queue_size: Option<Option<u32>>,
214    pub show_memory_fit_indicators: Option<Option<bool>>,
215    pub max_tool_iterations: Option<Option<u32>>,
216    pub max_stagnation_steps: Option<Option<u32>>,
217    pub default_model_id: Option<Option<i64>>,
218    pub inference_defaults: Option<Option<InferenceConfig>>,
219    pub voice_enabled: Option<Option<bool>>,
220    pub voice_interaction_mode: Option<Option<String>>,
221    pub voice_stt_model: Option<Option<String>>,
222    pub voice_tts_voice: Option<Option<String>>,
223    pub voice_tts_speed: Option<Option<f32>>,
224    pub voice_vad_threshold: Option<Option<f32>>,
225    pub voice_vad_silence_ms: Option<Option<u32>>,
226    pub voice_auto_speak: Option<Option<bool>>,
227    pub voice_input_device: Option<Option<String>>,
228    pub setup_completed: Option<Option<bool>>,
229}
230
231/// Settings validation error.
232#[derive(Debug, Clone, thiserror::Error)]
233pub enum SettingsError {
234    #[error("Context size must be between 512 and 1,000,000, got {0}")]
235    InvalidContextSize(u64),
236
237    #[error("Port should be >= 1024 (privileged ports require root), got {0}")]
238    InvalidPort(u16),
239
240    #[error("Max download queue size must be between 1 and 50, got {0}")]
241    InvalidQueueSize(u32),
242
243    #[error("Download path cannot be empty")]
244    EmptyDownloadPath,
245
246    #[error("Invalid inference parameter: {0}")]
247    InvalidInferenceConfig(String),
248}
249
250/// Validate settings values.
251pub fn validate_settings(settings: &Settings) -> Result<(), SettingsError> {
252    // Validate context size
253    if let Some(ctx_size) = settings.default_context_size {
254        if !(512..=1_000_000).contains(&ctx_size) {
255            return Err(SettingsError::InvalidContextSize(ctx_size));
256        }
257    }
258
259    // Validate proxy port
260    if let Some(port) = settings.proxy_port {
261        if port < 1024 {
262            return Err(SettingsError::InvalidPort(port));
263        }
264    }
265
266    // Validate llama-server base port
267    if let Some(port) = settings.llama_base_port {
268        if port < 1024 {
269            return Err(SettingsError::InvalidPort(port));
270        }
271    }
272
273    // Validate max download queue size
274    if let Some(queue_size) = settings.max_download_queue_size {
275        if !(1..=50).contains(&queue_size) {
276            return Err(SettingsError::InvalidQueueSize(queue_size));
277        }
278    }
279
280    // Validate download path if specified
281    if settings
282        .default_download_path
283        .as_ref()
284        .is_some_and(|p| p.trim().is_empty())
285    {
286        return Err(SettingsError::EmptyDownloadPath);
287    }
288
289    // Validate inference defaults if specified
290    if let Some(ref inference_config) = settings.inference_defaults {
291        validate_inference_config(inference_config)
292            .map_err(SettingsError::InvalidInferenceConfig)?;
293    }
294
295    Ok(())
296}
297
298/// Validate inference configuration parameters.
299///
300/// Checks that all specified parameters are within valid ranges.
301pub fn validate_inference_config(config: &InferenceConfig) -> Result<(), String> {
302    // Validate temperature (0.0 - 2.0)
303    if let Some(temp) = config.temperature {
304        if !(0.0..=2.0).contains(&temp) {
305            return Err(format!(
306                "Temperature must be between 0.0 and 2.0, got {temp}"
307            ));
308        }
309    }
310
311    // Validate top_p (0.0 - 1.0)
312    if let Some(top_p) = config.top_p {
313        if !(0.0..=1.0).contains(&top_p) {
314            return Err(format!("Top P must be between 0.0 and 1.0, got {top_p}"));
315        }
316    }
317
318    // Validate top_k (must be positive)
319    if let Some(top_k) = config.top_k {
320        if top_k <= 0 {
321            return Err(format!("Top K must be positive, got {top_k}"));
322        }
323    }
324
325    // Validate max_tokens (must be positive)
326    if let Some(max_tokens) = config.max_tokens {
327        if max_tokens == 0 {
328            return Err("Max tokens must be positive".to_string());
329        }
330    }
331
332    // Validate repeat_penalty (must be positive)
333    if let Some(repeat_penalty) = config.repeat_penalty {
334        if repeat_penalty <= 0.0 {
335            return Err(format!(
336                "Repeat penalty must be positive, got {repeat_penalty}"
337            ));
338        }
339    }
340
341    Ok(())
342}
343
344#[cfg(test)]
345mod tests {
346    use super::*;
347
348    #[test]
349    fn test_default_settings() {
350        let settings = Settings::with_defaults();
351        assert_eq!(settings.default_context_size, Some(4096));
352        assert_eq!(settings.proxy_port, Some(DEFAULT_PROXY_PORT));
353        assert_eq!(settings.llama_base_port, Some(DEFAULT_LLAMA_BASE_PORT));
354        assert_eq!(settings.default_download_path, None);
355        assert_eq!(settings.max_download_queue_size, Some(10));
356        assert_eq!(settings.show_memory_fit_indicators, Some(true));
357    }
358
359    #[test]
360    fn test_validate_settings_valid() {
361        let settings = Settings::with_defaults();
362        assert!(validate_settings(&settings).is_ok());
363    }
364
365    #[test]
366    fn test_validate_context_size_too_small() {
367        let settings = Settings {
368            default_context_size: Some(100),
369            ..Default::default()
370        };
371        assert!(matches!(
372            validate_settings(&settings),
373            Err(SettingsError::InvalidContextSize(100))
374        ));
375    }
376
377    #[test]
378    fn test_validate_context_size_too_large() {
379        let settings = Settings {
380            default_context_size: Some(2_000_000),
381            ..Default::default()
382        };
383        assert!(matches!(
384            validate_settings(&settings),
385            Err(SettingsError::InvalidContextSize(2_000_000))
386        ));
387    }
388
389    #[test]
390    fn test_validate_port_too_low() {
391        let settings = Settings {
392            proxy_port: Some(80),
393            ..Default::default()
394        };
395        assert!(matches!(
396            validate_settings(&settings),
397            Err(SettingsError::InvalidPort(80))
398        ));
399    }
400
401    #[test]
402    fn test_validate_empty_path() {
403        let settings = Settings {
404            default_download_path: Some(String::new()),
405            ..Default::default()
406        };
407        assert!(matches!(
408            validate_settings(&settings),
409            Err(SettingsError::EmptyDownloadPath)
410        ));
411    }
412
413    #[test]
414    fn test_validate_inference_config_valid() {
415        let config = InferenceConfig {
416            temperature: Some(0.7),
417            top_p: Some(0.9),
418            top_k: Some(40),
419            max_tokens: Some(2048),
420            repeat_penalty: Some(1.1),
421        };
422        assert!(validate_inference_config(&config).is_ok());
423    }
424
425    #[test]
426    fn test_validate_inference_config_temperature_out_of_range() {
427        let config = InferenceConfig {
428            temperature: Some(2.5),
429            ..Default::default()
430        };
431        assert!(validate_inference_config(&config).is_err());
432
433        let config = InferenceConfig {
434            temperature: Some(-0.1),
435            ..Default::default()
436        };
437        assert!(validate_inference_config(&config).is_err());
438    }
439
440    #[test]
441    fn test_validate_inference_config_top_p_out_of_range() {
442        let config = InferenceConfig {
443            top_p: Some(1.5),
444            ..Default::default()
445        };
446        assert!(validate_inference_config(&config).is_err());
447
448        let config = InferenceConfig {
449            top_p: Some(-0.1),
450            ..Default::default()
451        };
452        assert!(validate_inference_config(&config).is_err());
453    }
454
455    #[test]
456    fn test_validate_inference_config_negative_values() {
457        let config = InferenceConfig {
458            top_k: Some(-1),
459            ..Default::default()
460        };
461        assert!(validate_inference_config(&config).is_err());
462
463        let config = InferenceConfig {
464            repeat_penalty: Some(0.0),
465            ..Default::default()
466        };
467        assert!(validate_inference_config(&config).is_err());
468    }
469
470    #[test]
471    fn test_settings_with_valid_inference_defaults() {
472        let settings = Settings {
473            inference_defaults: Some(InferenceConfig {
474                temperature: Some(0.8),
475                top_p: Some(0.95),
476                ..Default::default()
477            }),
478            ..Settings::with_defaults()
479        };
480        assert!(validate_settings(&settings).is_ok());
481    }
482
483    #[test]
484    fn test_settings_with_invalid_inference_defaults() {
485        let settings = Settings {
486            inference_defaults: Some(InferenceConfig {
487                temperature: Some(3.0), // Invalid
488                ..Default::default()
489            }),
490            ..Settings::with_defaults()
491        };
492        assert!(validate_settings(&settings).is_err());
493    }
494
495    #[test]
496    fn test_validate_queue_size_too_small() {
497        let settings = Settings {
498            max_download_queue_size: Some(0),
499            ..Default::default()
500        };
501        assert!(matches!(
502            validate_settings(&settings),
503            Err(SettingsError::InvalidQueueSize(0))
504        ));
505    }
506
507    #[test]
508    fn test_validate_queue_size_too_large() {
509        let settings = Settings {
510            max_download_queue_size: Some(100),
511            ..Default::default()
512        };
513        assert!(matches!(
514            validate_settings(&settings),
515            Err(SettingsError::InvalidQueueSize(100))
516        ));
517    }
518
519    #[test]
520    fn test_merge_settings() {
521        let mut settings = Settings::with_defaults();
522        let update = SettingsUpdate {
523            default_context_size: Some(Some(8192)),
524            proxy_port: Some(None), // Clear proxy port
525            ..Default::default()
526        };
527        settings.merge(&update);
528
529        assert_eq!(settings.default_context_size, Some(8192));
530        assert_eq!(settings.proxy_port, None);
531        assert_eq!(settings.llama_base_port, Some(DEFAULT_LLAMA_BASE_PORT)); // Unchanged
532    }
533
534    #[test]
535    fn test_effective_ports() {
536        let settings = Settings::with_defaults();
537        assert_eq!(settings.effective_proxy_port(), DEFAULT_PROXY_PORT);
538        assert_eq!(
539            settings.effective_llama_base_port(),
540            DEFAULT_LLAMA_BASE_PORT
541        );
542
543        let settings_none = Settings::default();
544        assert_eq!(settings_none.effective_proxy_port(), DEFAULT_PROXY_PORT);
545        assert_eq!(
546            settings_none.effective_llama_base_port(),
547            DEFAULT_LLAMA_BASE_PORT
548        );
549    }
550}