gglib_core/
settings.rs

1//! Settings domain types and validation.
2//!
3//! This module contains the core settings types used across the application.
4//! These are pure domain types with no infrastructure dependencies.
5
6use serde::{Deserialize, Serialize};
7
8use crate::domain::InferenceConfig;
9
10/// Default port for the OpenAI-compatible proxy server.
11pub const DEFAULT_PROXY_PORT: u16 = 8080;
12
13/// Default base port for llama-server instance allocation.
14pub const DEFAULT_LLAMA_BASE_PORT: u16 = 9000;
15
16/// Application settings structure.
17///
18/// All fields are optional to support partial updates and graceful defaults.
19#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
20#[serde(default)]
21pub struct Settings {
22    /// Default directory for downloading models.
23    pub default_download_path: Option<String>,
24
25    /// Default context size for models (e.g., 4096, 8192).
26    pub default_context_size: Option<u64>,
27
28    /// Port for the OpenAI-compatible proxy server.
29    pub proxy_port: Option<u16>,
30
31    /// Base port for llama-server instance allocation (first port in range).
32    /// Note: The OpenAI-compatible proxy listens on `proxy_port`.
33    pub llama_base_port: Option<u16>,
34
35    /// Maximum number of downloads that can be queued (1-50).
36    pub max_download_queue_size: Option<u32>,
37
38    /// Whether to show memory fit indicators in `HuggingFace` browser.
39    pub show_memory_fit_indicators: Option<bool>,
40
41    /// Maximum iterations for tool calling agentic loop.
42    pub max_tool_iterations: Option<u32>,
43
44    /// Maximum stagnation steps before stopping agent loop.
45    pub max_stagnation_steps: Option<u32>,
46
47    /// Default model ID for commands that support a default model.
48    pub default_model_id: Option<i64>,
49
50    /// Global inference parameter defaults.
51    ///
52    /// Applied when neither request nor per-model defaults are specified.
53    /// If not set, hardcoded defaults are used as final fallback.
54    #[serde(default)]
55    pub inference_defaults: Option<InferenceConfig>,
56}
57
58impl Settings {
59    /// Create settings with sensible defaults.
60    #[must_use]
61    pub const fn with_defaults() -> Self {
62        Self {
63            default_download_path: None,
64            default_context_size: Some(4096),
65            proxy_port: Some(DEFAULT_PROXY_PORT),
66            llama_base_port: Some(DEFAULT_LLAMA_BASE_PORT),
67            max_download_queue_size: Some(10),
68            show_memory_fit_indicators: Some(true),
69            max_tool_iterations: Some(25),
70            max_stagnation_steps: Some(5),
71            default_model_id: None,
72            inference_defaults: None,
73        }
74    }
75
76    /// Get the effective proxy port (with default fallback).
77    #[must_use]
78    pub const fn effective_proxy_port(&self) -> u16 {
79        match self.proxy_port {
80            Some(port) => port,
81            None => DEFAULT_PROXY_PORT,
82        }
83    }
84
85    /// Get the effective llama-server base port (with default fallback).
86    #[must_use]
87    pub const fn effective_llama_base_port(&self) -> u16 {
88        match self.llama_base_port {
89            Some(port) => port,
90            None => DEFAULT_LLAMA_BASE_PORT,
91        }
92    }
93
94    /// Merge another settings into this one, only updating fields that are Some.
95    pub fn merge(&mut self, other: &SettingsUpdate) {
96        if let Some(ref path) = other.default_download_path {
97            self.default_download_path.clone_from(path);
98        }
99        if let Some(ref ctx_size) = other.default_context_size {
100            self.default_context_size = *ctx_size;
101        }
102        if let Some(ref port) = other.proxy_port {
103            self.proxy_port = *port;
104        }
105        if let Some(ref port) = other.llama_base_port {
106            self.llama_base_port = *port;
107        }
108        if let Some(ref queue_size) = other.max_download_queue_size {
109            self.max_download_queue_size = *queue_size;
110        }
111        if let Some(ref show_fit) = other.show_memory_fit_indicators {
112            self.show_memory_fit_indicators = *show_fit;
113        }
114        if let Some(ref iters) = other.max_tool_iterations {
115            self.max_tool_iterations = *iters;
116        }
117        if let Some(ref steps) = other.max_stagnation_steps {
118            self.max_stagnation_steps = *steps;
119        }
120        if let Some(ref model_id) = other.default_model_id {
121            self.default_model_id = *model_id;
122        }
123        if let Some(ref inference_defaults) = other.inference_defaults {
124            self.inference_defaults.clone_from(inference_defaults);
125        }
126    }
127}
128
129/// Partial settings update.
130///
131/// Each field is `Option<Option<T>>`:
132/// - `None` = don't change this field
133/// - `Some(None)` = set field to None/null
134/// - `Some(Some(value))` = set field to value
135#[derive(Debug, Clone, Default, Serialize, Deserialize)]
136pub struct SettingsUpdate {
137    pub default_download_path: Option<Option<String>>,
138    pub default_context_size: Option<Option<u64>>,
139    pub proxy_port: Option<Option<u16>>,
140    pub llama_base_port: Option<Option<u16>>,
141    pub max_download_queue_size: Option<Option<u32>>,
142    pub show_memory_fit_indicators: Option<Option<bool>>,
143    pub max_tool_iterations: Option<Option<u32>>,
144    pub max_stagnation_steps: Option<Option<u32>>,
145    pub default_model_id: Option<Option<i64>>,
146    pub inference_defaults: Option<Option<InferenceConfig>>,
147}
148
149/// Settings validation error.
150#[derive(Debug, Clone, thiserror::Error)]
151pub enum SettingsError {
152    #[error("Context size must be between 512 and 1,000,000, got {0}")]
153    InvalidContextSize(u64),
154
155    #[error("Port should be >= 1024 (privileged ports require root), got {0}")]
156    InvalidPort(u16),
157
158    #[error("Max download queue size must be between 1 and 50, got {0}")]
159    InvalidQueueSize(u32),
160
161    #[error("Download path cannot be empty")]
162    EmptyDownloadPath,
163
164    #[error("Invalid inference parameter: {0}")]
165    InvalidInferenceConfig(String),
166}
167
168/// Validate settings values.
169pub fn validate_settings(settings: &Settings) -> Result<(), SettingsError> {
170    // Validate context size
171    if let Some(ctx_size) = settings.default_context_size {
172        if !(512..=1_000_000).contains(&ctx_size) {
173            return Err(SettingsError::InvalidContextSize(ctx_size));
174        }
175    }
176
177    // Validate proxy port
178    if let Some(port) = settings.proxy_port {
179        if port < 1024 {
180            return Err(SettingsError::InvalidPort(port));
181        }
182    }
183
184    // Validate llama-server base port
185    if let Some(port) = settings.llama_base_port {
186        if port < 1024 {
187            return Err(SettingsError::InvalidPort(port));
188        }
189    }
190
191    // Validate max download queue size
192    if let Some(queue_size) = settings.max_download_queue_size {
193        if !(1..=50).contains(&queue_size) {
194            return Err(SettingsError::InvalidQueueSize(queue_size));
195        }
196    }
197
198    // Validate download path if specified
199    if settings
200        .default_download_path
201        .as_ref()
202        .is_some_and(|p| p.trim().is_empty())
203    {
204        return Err(SettingsError::EmptyDownloadPath);
205    }
206
207    // Validate inference defaults if specified
208    if let Some(ref inference_config) = settings.inference_defaults {
209        validate_inference_config(inference_config)
210            .map_err(SettingsError::InvalidInferenceConfig)?;
211    }
212
213    Ok(())
214}
215
216/// Validate inference configuration parameters.
217///
218/// Checks that all specified parameters are within valid ranges.
219pub fn validate_inference_config(config: &InferenceConfig) -> Result<(), String> {
220    // Validate temperature (0.0 - 2.0)
221    if let Some(temp) = config.temperature {
222        if !(0.0..=2.0).contains(&temp) {
223            return Err(format!(
224                "Temperature must be between 0.0 and 2.0, got {temp}"
225            ));
226        }
227    }
228
229    // Validate top_p (0.0 - 1.0)
230    if let Some(top_p) = config.top_p {
231        if !(0.0..=1.0).contains(&top_p) {
232            return Err(format!("Top P must be between 0.0 and 1.0, got {top_p}"));
233        }
234    }
235
236    // Validate top_k (must be positive)
237    if let Some(top_k) = config.top_k {
238        if top_k <= 0 {
239            return Err(format!("Top K must be positive, got {top_k}"));
240        }
241    }
242
243    // Validate max_tokens (must be positive)
244    if let Some(max_tokens) = config.max_tokens {
245        if max_tokens == 0 {
246            return Err("Max tokens must be positive".to_string());
247        }
248    }
249
250    // Validate repeat_penalty (must be positive)
251    if let Some(repeat_penalty) = config.repeat_penalty {
252        if repeat_penalty <= 0.0 {
253            return Err(format!(
254                "Repeat penalty must be positive, got {repeat_penalty}"
255            ));
256        }
257    }
258
259    Ok(())
260}
261
262#[cfg(test)]
263mod tests {
264    use super::*;
265
266    #[test]
267    fn test_default_settings() {
268        let settings = Settings::with_defaults();
269        assert_eq!(settings.default_context_size, Some(4096));
270        assert_eq!(settings.proxy_port, Some(DEFAULT_PROXY_PORT));
271        assert_eq!(settings.llama_base_port, Some(DEFAULT_LLAMA_BASE_PORT));
272        assert_eq!(settings.default_download_path, None);
273        assert_eq!(settings.max_download_queue_size, Some(10));
274        assert_eq!(settings.show_memory_fit_indicators, Some(true));
275    }
276
277    #[test]
278    fn test_validate_settings_valid() {
279        let settings = Settings::with_defaults();
280        assert!(validate_settings(&settings).is_ok());
281    }
282
283    #[test]
284    fn test_validate_context_size_too_small() {
285        let settings = Settings {
286            default_context_size: Some(100),
287            ..Default::default()
288        };
289        assert!(matches!(
290            validate_settings(&settings),
291            Err(SettingsError::InvalidContextSize(100))
292        ));
293    }
294
295    #[test]
296    fn test_validate_context_size_too_large() {
297        let settings = Settings {
298            default_context_size: Some(2_000_000),
299            ..Default::default()
300        };
301        assert!(matches!(
302            validate_settings(&settings),
303            Err(SettingsError::InvalidContextSize(2_000_000))
304        ));
305    }
306
307    #[test]
308    fn test_validate_port_too_low() {
309        let settings = Settings {
310            proxy_port: Some(80),
311            ..Default::default()
312        };
313        assert!(matches!(
314            validate_settings(&settings),
315            Err(SettingsError::InvalidPort(80))
316        ));
317    }
318
319    #[test]
320    fn test_validate_empty_path() {
321        let settings = Settings {
322            default_download_path: Some(String::new()),
323            ..Default::default()
324        };
325        assert!(matches!(
326            validate_settings(&settings),
327            Err(SettingsError::EmptyDownloadPath)
328        ));
329    }
330
331    #[test]
332    fn test_validate_inference_config_valid() {
333        let config = InferenceConfig {
334            temperature: Some(0.7),
335            top_p: Some(0.9),
336            top_k: Some(40),
337            max_tokens: Some(2048),
338            repeat_penalty: Some(1.1),
339        };
340        assert!(validate_inference_config(&config).is_ok());
341    }
342
343    #[test]
344    fn test_validate_inference_config_temperature_out_of_range() {
345        let config = InferenceConfig {
346            temperature: Some(2.5),
347            ..Default::default()
348        };
349        assert!(validate_inference_config(&config).is_err());
350
351        let config = InferenceConfig {
352            temperature: Some(-0.1),
353            ..Default::default()
354        };
355        assert!(validate_inference_config(&config).is_err());
356    }
357
358    #[test]
359    fn test_validate_inference_config_top_p_out_of_range() {
360        let config = InferenceConfig {
361            top_p: Some(1.5),
362            ..Default::default()
363        };
364        assert!(validate_inference_config(&config).is_err());
365
366        let config = InferenceConfig {
367            top_p: Some(-0.1),
368            ..Default::default()
369        };
370        assert!(validate_inference_config(&config).is_err());
371    }
372
373    #[test]
374    fn test_validate_inference_config_negative_values() {
375        let config = InferenceConfig {
376            top_k: Some(-1),
377            ..Default::default()
378        };
379        assert!(validate_inference_config(&config).is_err());
380
381        let config = InferenceConfig {
382            repeat_penalty: Some(0.0),
383            ..Default::default()
384        };
385        assert!(validate_inference_config(&config).is_err());
386    }
387
388    #[test]
389    fn test_settings_with_valid_inference_defaults() {
390        let settings = Settings {
391            inference_defaults: Some(InferenceConfig {
392                temperature: Some(0.8),
393                top_p: Some(0.95),
394                ..Default::default()
395            }),
396            ..Settings::with_defaults()
397        };
398        assert!(validate_settings(&settings).is_ok());
399    }
400
401    #[test]
402    fn test_settings_with_invalid_inference_defaults() {
403        let settings = Settings {
404            inference_defaults: Some(InferenceConfig {
405                temperature: Some(3.0), // Invalid
406                ..Default::default()
407            }),
408            ..Settings::with_defaults()
409        };
410        assert!(validate_settings(&settings).is_err());
411    }
412
413    #[test]
414    fn test_validate_queue_size_too_small() {
415        let settings = Settings {
416            max_download_queue_size: Some(0),
417            ..Default::default()
418        };
419        assert!(matches!(
420            validate_settings(&settings),
421            Err(SettingsError::InvalidQueueSize(0))
422        ));
423    }
424
425    #[test]
426    fn test_validate_queue_size_too_large() {
427        let settings = Settings {
428            max_download_queue_size: Some(100),
429            ..Default::default()
430        };
431        assert!(matches!(
432            validate_settings(&settings),
433            Err(SettingsError::InvalidQueueSize(100))
434        ));
435    }
436
437    #[test]
438    fn test_merge_settings() {
439        let mut settings = Settings::with_defaults();
440        let update = SettingsUpdate {
441            default_context_size: Some(Some(8192)),
442            proxy_port: Some(None), // Clear proxy port
443            ..Default::default()
444        };
445        settings.merge(&update);
446
447        assert_eq!(settings.default_context_size, Some(8192));
448        assert_eq!(settings.proxy_port, None);
449        assert_eq!(settings.llama_base_port, Some(DEFAULT_LLAMA_BASE_PORT)); // Unchanged
450    }
451
452    #[test]
453    fn test_effective_ports() {
454        let settings = Settings::with_defaults();
455        assert_eq!(settings.effective_proxy_port(), DEFAULT_PROXY_PORT);
456        assert_eq!(
457            settings.effective_llama_base_port(),
458            DEFAULT_LLAMA_BASE_PORT
459        );
460
461        let settings_none = Settings::default();
462        assert_eq!(settings_none.effective_proxy_port(), DEFAULT_PROXY_PORT);
463        assert_eq!(
464            settings_none.effective_llama_base_port(),
465            DEFAULT_LLAMA_BASE_PORT
466        );
467    }
468}