Skip to main content

fugue_evo/fitness/
benchmarks.rs

1//! Benchmark fitness functions
2//!
3//! This module provides standard benchmark functions for testing evolutionary algorithms.
4
5use std::f64::consts::PI;
6
7use crate::fitness::traits::Fitness;
8use crate::genome::bit_string::BitString;
9use crate::genome::real_vector::RealVector;
10use crate::genome::traits::{BinaryGenome, RealValuedGenome};
11
12/// Trait for benchmark functions
13pub trait BenchmarkFunction: Send + Sync {
14    /// Name of the benchmark function
15    fn name(&self) -> &'static str;
16
17    /// Dimensionality of the problem
18    fn dimension(&self) -> usize;
19
20    /// Search space bounds (min, max)
21    fn bounds(&self) -> (f64, f64);
22
23    /// Optimal (minimum) fitness value
24    fn optimal_fitness(&self) -> f64;
25
26    /// Optimal solution (if known)
27    fn optimal_solution(&self) -> Option<Vec<f64>>;
28
29    /// Evaluate the function (returns value to be MINIMIZED)
30    fn evaluate_raw(&self, x: &[f64]) -> f64;
31}
32
33/// Sphere function: f(x) = Σxᵢ²
34///
35/// Unimodal, convex, separable. Optimum at origin.
36#[derive(Clone, Debug)]
37pub struct Sphere {
38    dimension: usize,
39}
40
41impl Sphere {
42    /// Create a new Sphere function
43    pub fn new(dimension: usize) -> Self {
44        Self { dimension }
45    }
46}
47
48impl BenchmarkFunction for Sphere {
49    fn name(&self) -> &'static str {
50        "Sphere"
51    }
52
53    fn dimension(&self) -> usize {
54        self.dimension
55    }
56
57    fn bounds(&self) -> (f64, f64) {
58        (-5.12, 5.12)
59    }
60
61    fn optimal_fitness(&self) -> f64 {
62        0.0
63    }
64
65    fn optimal_solution(&self) -> Option<Vec<f64>> {
66        Some(vec![0.0; self.dimension])
67    }
68
69    fn evaluate_raw(&self, x: &[f64]) -> f64 {
70        x.iter().map(|xi| xi * xi).sum()
71    }
72}
73
74impl Fitness for Sphere {
75    type Genome = RealVector;
76    type Value = f64;
77
78    fn evaluate(&self, genome: &Self::Genome) -> f64 {
79        // Negate for maximization (GA convention)
80        -self.evaluate_raw(genome.genes())
81    }
82}
83
84/// Rastrigin function: f(x) = 10n + Σ(xᵢ² - 10cos(2πxᵢ))
85///
86/// Highly multimodal with many local minima. Optimum at origin.
87#[derive(Clone, Debug)]
88pub struct Rastrigin {
89    dimension: usize,
90}
91
92impl Rastrigin {
93    /// Create a new Rastrigin function
94    pub fn new(dimension: usize) -> Self {
95        Self { dimension }
96    }
97}
98
99impl BenchmarkFunction for Rastrigin {
100    fn name(&self) -> &'static str {
101        "Rastrigin"
102    }
103
104    fn dimension(&self) -> usize {
105        self.dimension
106    }
107
108    fn bounds(&self) -> (f64, f64) {
109        (-5.12, 5.12)
110    }
111
112    fn optimal_fitness(&self) -> f64 {
113        0.0
114    }
115
116    fn optimal_solution(&self) -> Option<Vec<f64>> {
117        Some(vec![0.0; self.dimension])
118    }
119
120    fn evaluate_raw(&self, x: &[f64]) -> f64 {
121        let a = 10.0;
122        let n = x.len() as f64;
123        a * n
124            + x.iter()
125                .map(|xi| xi * xi - a * (2.0 * PI * xi).cos())
126                .sum::<f64>()
127    }
128}
129
130impl Fitness for Rastrigin {
131    type Genome = RealVector;
132    type Value = f64;
133
134    fn evaluate(&self, genome: &Self::Genome) -> f64 {
135        -self.evaluate_raw(genome.genes())
136    }
137}
138
139/// Rosenbrock function: f(x) = Σ[100(xᵢ₊₁-xᵢ²)² + (1-xᵢ)²]
140///
141/// Valley structure, non-separable. Optimum at (1,1,...,1).
142#[derive(Clone, Debug)]
143pub struct Rosenbrock {
144    dimension: usize,
145}
146
147impl Rosenbrock {
148    /// Create a new Rosenbrock function
149    pub fn new(dimension: usize) -> Self {
150        assert!(dimension >= 2, "Rosenbrock requires at least 2 dimensions");
151        Self { dimension }
152    }
153}
154
155impl BenchmarkFunction for Rosenbrock {
156    fn name(&self) -> &'static str {
157        "Rosenbrock"
158    }
159
160    fn dimension(&self) -> usize {
161        self.dimension
162    }
163
164    fn bounds(&self) -> (f64, f64) {
165        (-5.0, 10.0)
166    }
167
168    fn optimal_fitness(&self) -> f64 {
169        0.0
170    }
171
172    fn optimal_solution(&self) -> Option<Vec<f64>> {
173        Some(vec![1.0; self.dimension])
174    }
175
176    fn evaluate_raw(&self, x: &[f64]) -> f64 {
177        x.windows(2)
178            .map(|w| {
179                let xi = w[0];
180                let xi1 = w[1];
181                100.0 * (xi1 - xi * xi).powi(2) + (1.0 - xi).powi(2)
182            })
183            .sum()
184    }
185}
186
187impl Fitness for Rosenbrock {
188    type Genome = RealVector;
189    type Value = f64;
190
191    fn evaluate(&self, genome: &Self::Genome) -> f64 {
192        -self.evaluate_raw(genome.genes())
193    }
194}
195
196/// Ackley function
197///
198/// Nearly flat outer region with many local minima. Optimum at origin.
199#[derive(Clone, Debug)]
200pub struct Ackley {
201    dimension: usize,
202    a: f64,
203    b: f64,
204    c: f64,
205}
206
207impl Ackley {
208    /// Create a new Ackley function with default parameters
209    pub fn new(dimension: usize) -> Self {
210        Self {
211            dimension,
212            a: 20.0,
213            b: 0.2,
214            c: 2.0 * PI,
215        }
216    }
217
218    /// Create with custom parameters
219    pub fn with_params(dimension: usize, a: f64, b: f64, c: f64) -> Self {
220        Self { dimension, a, b, c }
221    }
222}
223
224impl BenchmarkFunction for Ackley {
225    fn name(&self) -> &'static str {
226        "Ackley"
227    }
228
229    fn dimension(&self) -> usize {
230        self.dimension
231    }
232
233    fn bounds(&self) -> (f64, f64) {
234        (-32.768, 32.768)
235    }
236
237    fn optimal_fitness(&self) -> f64 {
238        0.0
239    }
240
241    fn optimal_solution(&self) -> Option<Vec<f64>> {
242        Some(vec![0.0; self.dimension])
243    }
244
245    fn evaluate_raw(&self, x: &[f64]) -> f64 {
246        let n = x.len() as f64;
247        let sum_sq = x.iter().map(|xi| xi * xi).sum::<f64>();
248        let sum_cos = x.iter().map(|xi| (self.c * xi).cos()).sum::<f64>();
249
250        -self.a * (-self.b * (sum_sq / n).sqrt()).exp() - (sum_cos / n).exp()
251            + self.a
252            + std::f64::consts::E
253    }
254}
255
256impl Fitness for Ackley {
257    type Genome = RealVector;
258    type Value = f64;
259
260    fn evaluate(&self, genome: &Self::Genome) -> f64 {
261        -self.evaluate_raw(genome.genes())
262    }
263}
264
265/// Griewank function: f(x) = Σxᵢ²/4000 - Πcos(xᵢ/√i) + 1
266///
267/// Many local minima. Optimum at origin.
268#[derive(Clone, Debug)]
269pub struct Griewank {
270    dimension: usize,
271}
272
273impl Griewank {
274    /// Create a new Griewank function
275    pub fn new(dimension: usize) -> Self {
276        Self { dimension }
277    }
278}
279
280impl BenchmarkFunction for Griewank {
281    fn name(&self) -> &'static str {
282        "Griewank"
283    }
284
285    fn dimension(&self) -> usize {
286        self.dimension
287    }
288
289    fn bounds(&self) -> (f64, f64) {
290        (-600.0, 600.0)
291    }
292
293    fn optimal_fitness(&self) -> f64 {
294        0.0
295    }
296
297    fn optimal_solution(&self) -> Option<Vec<f64>> {
298        Some(vec![0.0; self.dimension])
299    }
300
301    fn evaluate_raw(&self, x: &[f64]) -> f64 {
302        let sum_sq: f64 = x.iter().map(|xi| xi * xi).sum::<f64>() / 4000.0;
303        let prod_cos: f64 = x
304            .iter()
305            .enumerate()
306            .map(|(i, xi)| (xi / ((i + 1) as f64).sqrt()).cos())
307            .product();
308        sum_sq - prod_cos + 1.0
309    }
310}
311
312impl Fitness for Griewank {
313    type Genome = RealVector;
314    type Value = f64;
315
316    fn evaluate(&self, genome: &Self::Genome) -> f64 {
317        -self.evaluate_raw(genome.genes())
318    }
319}
320
321/// Schwefel function
322///
323/// Deceptive - global optimum far from local optima.
324#[derive(Clone, Debug)]
325pub struct Schwefel {
326    dimension: usize,
327}
328
329impl Schwefel {
330    /// Create a new Schwefel function
331    pub fn new(dimension: usize) -> Self {
332        Self { dimension }
333    }
334}
335
336impl BenchmarkFunction for Schwefel {
337    fn name(&self) -> &'static str {
338        "Schwefel"
339    }
340
341    fn dimension(&self) -> usize {
342        self.dimension
343    }
344
345    fn bounds(&self) -> (f64, f64) {
346        (-500.0, 500.0)
347    }
348
349    fn optimal_fitness(&self) -> f64 {
350        0.0
351    }
352
353    fn optimal_solution(&self) -> Option<Vec<f64>> {
354        Some(vec![420.9687; self.dimension])
355    }
356
357    fn evaluate_raw(&self, x: &[f64]) -> f64 {
358        let n = x.len() as f64;
359        418.9829 * n - x.iter().map(|xi| xi * xi.abs().sqrt().sin()).sum::<f64>()
360    }
361}
362
363impl Fitness for Schwefel {
364    type Genome = RealVector;
365    type Value = f64;
366
367    fn evaluate(&self, genome: &Self::Genome) -> f64 {
368        -self.evaluate_raw(genome.genes())
369    }
370}
371
372/// OneMax function for bit strings
373///
374/// Counts the number of 1s in the bit string. Optimum when all bits are 1.
375#[derive(Clone, Debug)]
376pub struct OneMax {
377    length: usize,
378}
379
380impl OneMax {
381    /// Create a new OneMax function
382    pub fn new(length: usize) -> Self {
383        Self { length }
384    }
385
386    /// Get the length
387    pub fn length(&self) -> usize {
388        self.length
389    }
390}
391
392impl Fitness for OneMax {
393    type Genome = BitString;
394    type Value = usize;
395
396    fn evaluate(&self, genome: &Self::Genome) -> usize {
397        genome.count_ones()
398    }
399}
400
401/// LeadingOnes function for bit strings
402///
403/// Counts the number of leading 1s before the first 0.
404#[derive(Clone, Debug)]
405pub struct LeadingOnes {
406    #[allow(dead_code)]
407    length: usize,
408}
409
410impl LeadingOnes {
411    /// Create a new LeadingOnes function
412    pub fn new(length: usize) -> Self {
413        Self { length }
414    }
415}
416
417impl Fitness for LeadingOnes {
418    type Genome = BitString;
419    type Value = usize;
420
421    fn evaluate(&self, genome: &Self::Genome) -> usize {
422        genome.bits().iter().take_while(|&&b| b).count()
423    }
424}
425
426// =============================================================================
427// Multi-Objective Test Problems (ZDT)
428// =============================================================================
429
430/// ZDT1 multi-objective test problem
431///
432/// Two objectives with a convex Pareto front.
433/// Reference: Zitzler, E., Deb, K., & Thiele, L. (2000).
434#[derive(Clone, Debug)]
435pub struct Zdt1 {
436    dimension: usize,
437}
438
439impl Zdt1 {
440    /// Create a new ZDT1 function
441    pub fn new(dimension: usize) -> Self {
442        assert!(dimension >= 2, "ZDT1 requires at least 2 dimensions");
443        Self { dimension }
444    }
445
446    /// Evaluate the function (returns [f1, f2])
447    pub fn evaluate(&self, x: &[f64]) -> [f64; 2] {
448        let n = x.len() as f64;
449        let f1 = x[0];
450        let g = 1.0 + 9.0 * x[1..].iter().sum::<f64>() / (n - 1.0);
451        let f2 = g * (1.0 - (f1 / g).sqrt());
452        [f1, f2]
453    }
454
455    /// Bounds for each variable [0, 1]
456    pub fn bounds(&self) -> (f64, f64) {
457        (0.0, 1.0)
458    }
459
460    /// Get the dimension
461    pub fn dimension(&self) -> usize {
462        self.dimension
463    }
464}
465
466/// ZDT2 multi-objective test problem
467///
468/// Two objectives with a non-convex Pareto front.
469#[derive(Clone, Debug)]
470pub struct Zdt2 {
471    dimension: usize,
472}
473
474impl Zdt2 {
475    /// Create a new ZDT2 function
476    pub fn new(dimension: usize) -> Self {
477        assert!(dimension >= 2, "ZDT2 requires at least 2 dimensions");
478        Self { dimension }
479    }
480
481    /// Evaluate the function (returns [f1, f2])
482    pub fn evaluate(&self, x: &[f64]) -> [f64; 2] {
483        let n = x.len() as f64;
484        let f1 = x[0];
485        let g = 1.0 + 9.0 * x[1..].iter().sum::<f64>() / (n - 1.0);
486        let f2 = g * (1.0 - (f1 / g).powi(2));
487        [f1, f2]
488    }
489
490    /// Bounds for each variable [0, 1]
491    pub fn bounds(&self) -> (f64, f64) {
492        (0.0, 1.0)
493    }
494
495    /// Get the dimension
496    pub fn dimension(&self) -> usize {
497        self.dimension
498    }
499}
500
501/// ZDT3 multi-objective test problem
502///
503/// Two objectives with a disconnected Pareto front.
504#[derive(Clone, Debug)]
505pub struct Zdt3 {
506    dimension: usize,
507}
508
509impl Zdt3 {
510    /// Create a new ZDT3 function
511    pub fn new(dimension: usize) -> Self {
512        assert!(dimension >= 2, "ZDT3 requires at least 2 dimensions");
513        Self { dimension }
514    }
515
516    /// Evaluate the function (returns [f1, f2])
517    pub fn evaluate(&self, x: &[f64]) -> [f64; 2] {
518        let n = x.len() as f64;
519        let f1 = x[0];
520        let g = 1.0 + 9.0 * x[1..].iter().sum::<f64>() / (n - 1.0);
521        let h = 1.0 - (f1 / g).sqrt() - (f1 / g) * (10.0 * PI * f1).sin();
522        let f2 = g * h;
523        [f1, f2]
524    }
525
526    /// Bounds for each variable [0, 1]
527    pub fn bounds(&self) -> (f64, f64) {
528        (0.0, 1.0)
529    }
530
531    /// Get the dimension
532    pub fn dimension(&self) -> usize {
533        self.dimension
534    }
535}
536
537/// Schaffer N.1 multi-objective problem
538///
539/// Simple bi-objective problem with a single variable.
540#[derive(Clone, Debug)]
541pub struct SchafferN1;
542
543impl SchafferN1 {
544    /// Create a new Schaffer N.1 function
545    pub fn new() -> Self {
546        Self
547    }
548
549    /// Evaluate the function (returns [f1, f2])
550    pub fn evaluate(&self, x: f64) -> [f64; 2] {
551        let f1 = x * x;
552        let f2 = (x - 2.0) * (x - 2.0);
553        [f1, f2]
554    }
555
556    /// Bounds for the variable
557    pub fn bounds(&self) -> (f64, f64) {
558        (-10.0, 10.0)
559    }
560}
561
562impl Default for SchafferN1 {
563    fn default() -> Self {
564        Self::new()
565    }
566}
567
568// =============================================================================
569// Additional Single-Objective Functions
570// =============================================================================
571
572/// Levy function
573///
574/// Multimodal with many local minima. Optimum at (1, 1, ..., 1).
575#[derive(Clone, Debug)]
576pub struct Levy {
577    dimension: usize,
578}
579
580impl Levy {
581    /// Create a new Levy function
582    pub fn new(dimension: usize) -> Self {
583        Self { dimension }
584    }
585}
586
587impl BenchmarkFunction for Levy {
588    fn name(&self) -> &'static str {
589        "Levy"
590    }
591
592    fn dimension(&self) -> usize {
593        self.dimension
594    }
595
596    fn bounds(&self) -> (f64, f64) {
597        (-10.0, 10.0)
598    }
599
600    fn optimal_fitness(&self) -> f64 {
601        0.0
602    }
603
604    fn optimal_solution(&self) -> Option<Vec<f64>> {
605        Some(vec![1.0; self.dimension])
606    }
607
608    fn evaluate_raw(&self, x: &[f64]) -> f64 {
609        let w: Vec<f64> = x.iter().map(|xi| 1.0 + (xi - 1.0) / 4.0).collect();
610        let n = w.len();
611
612        let term1 = (PI * w[0]).sin().powi(2);
613
614        let sum: f64 = w[..n - 1]
615            .iter()
616            .map(|wi| (wi - 1.0).powi(2) * (1.0 + 10.0 * (PI * wi + 1.0).sin().powi(2)))
617            .sum();
618
619        let term3 = (w[n - 1] - 1.0).powi(2) * (1.0 + (2.0 * PI * w[n - 1]).sin().powi(2));
620
621        term1 + sum + term3
622    }
623}
624
625impl Fitness for Levy {
626    type Genome = RealVector;
627    type Value = f64;
628
629    fn evaluate(&self, genome: &Self::Genome) -> f64 {
630        -self.evaluate_raw(genome.genes())
631    }
632}
633
634/// Dixon-Price function
635///
636/// Valley structure. Optimum depends on dimension.
637#[derive(Clone, Debug)]
638pub struct DixonPrice {
639    dimension: usize,
640}
641
642impl DixonPrice {
643    /// Create a new Dixon-Price function
644    pub fn new(dimension: usize) -> Self {
645        Self { dimension }
646    }
647}
648
649impl BenchmarkFunction for DixonPrice {
650    fn name(&self) -> &'static str {
651        "Dixon-Price"
652    }
653
654    fn dimension(&self) -> usize {
655        self.dimension
656    }
657
658    fn bounds(&self) -> (f64, f64) {
659        (-10.0, 10.0)
660    }
661
662    fn optimal_fitness(&self) -> f64 {
663        0.0
664    }
665
666    fn optimal_solution(&self) -> Option<Vec<f64>> {
667        // Optimal solution: x_i = 2^(-(2^i - 2) / 2^i)
668        let optimal: Vec<f64> = (0..self.dimension)
669            .map(|i| {
670                let exp_num = (1u64 << i) as f64 - 2.0;
671                let exp_den = (1u64 << (i + 1)) as f64;
672                2.0_f64.powf(-exp_num / exp_den)
673            })
674            .collect();
675        Some(optimal)
676    }
677
678    fn evaluate_raw(&self, x: &[f64]) -> f64 {
679        let term1 = (x[0] - 1.0).powi(2);
680
681        let sum: f64 = x
682            .windows(2)
683            .enumerate()
684            .map(|(i, w)| (i + 2) as f64 * (2.0 * w[1] * w[1] - w[0]).powi(2))
685            .sum();
686
687        term1 + sum
688    }
689}
690
691impl Fitness for DixonPrice {
692    type Genome = RealVector;
693    type Value = f64;
694
695    fn evaluate(&self, genome: &Self::Genome) -> f64 {
696        -self.evaluate_raw(genome.genes())
697    }
698}
699
700/// Styblinski-Tang function
701///
702/// Multimodal. Optimum at (-2.903534, ..., -2.903534).
703#[derive(Clone, Debug)]
704pub struct StyblinskiTang {
705    dimension: usize,
706}
707
708impl StyblinskiTang {
709    /// Create a new Styblinski-Tang function
710    pub fn new(dimension: usize) -> Self {
711        Self { dimension }
712    }
713}
714
715impl BenchmarkFunction for StyblinskiTang {
716    fn name(&self) -> &'static str {
717        "Styblinski-Tang"
718    }
719
720    fn dimension(&self) -> usize {
721        self.dimension
722    }
723
724    fn bounds(&self) -> (f64, f64) {
725        (-5.0, 5.0)
726    }
727
728    fn optimal_fitness(&self) -> f64 {
729        // f(optimal) = -39.16617 * dimension
730        -39.16617 * self.dimension as f64
731    }
732
733    fn optimal_solution(&self) -> Option<Vec<f64>> {
734        Some(vec![-2.903534; self.dimension])
735    }
736
737    fn evaluate_raw(&self, x: &[f64]) -> f64 {
738        x.iter()
739            .map(|xi| xi.powi(4) - 16.0 * xi.powi(2) + 5.0 * xi)
740            .sum::<f64>()
741            / 2.0
742    }
743}
744
745impl Fitness for StyblinskiTang {
746    type Genome = RealVector;
747    type Value = f64;
748
749    fn evaluate(&self, genome: &Self::Genome) -> f64 {
750        -self.evaluate_raw(genome.genes())
751    }
752}
753
754// ============================================================================
755// Combinatorial Benchmarks
756// ============================================================================
757
758/// Royal Road function
759///
760/// Tests the "building block hypothesis" - fitness is the count of complete
761/// schemas (contiguous blocks of 1s). Rewards completing full blocks rather
762/// than partial solutions.
763///
764/// For example, with schema_size=8 and num_schemas=4 (32-bit string):
765/// - 11111111 00000000 11111111 00000000 has fitness 2 (two complete blocks)
766/// - 11111110 11111111 11111111 11111111 has fitness 3 (one incomplete)
767#[derive(Clone, Debug)]
768pub struct RoyalRoad {
769    /// Size of each schema (block of consecutive 1s)
770    pub schema_size: usize,
771    /// Number of schemas in the genome
772    pub num_schemas: usize,
773}
774
775impl RoyalRoad {
776    /// Create a new Royal Road function
777    ///
778    /// # Arguments
779    /// * `schema_size` - Number of bits in each schema (typically 8)
780    /// * `num_schemas` - Number of schemas (typically 8)
781    pub fn new(schema_size: usize, num_schemas: usize) -> Self {
782        Self {
783            schema_size,
784            num_schemas,
785        }
786    }
787
788    /// Standard Royal Road configuration (8x8 = 64 bits)
789    pub fn standard() -> Self {
790        Self::new(8, 8)
791    }
792
793    /// Total genome length required
794    pub fn genome_length(&self) -> usize {
795        self.schema_size * self.num_schemas
796    }
797
798    /// Check if a schema (block) is complete (all 1s)
799    fn is_complete_schema(&self, bits: &[bool], schema_index: usize) -> bool {
800        let start = schema_index * self.schema_size;
801        let end = start + self.schema_size;
802
803        if end > bits.len() {
804            return false;
805        }
806
807        bits[start..end].iter().all(|&b| b)
808    }
809
810    /// Count the number of complete schemas
811    pub fn count_complete_schemas(&self, bits: &[bool]) -> usize {
812        (0..self.num_schemas)
813            .filter(|&i| self.is_complete_schema(bits, i))
814            .count()
815    }
816}
817
818impl Fitness for RoyalRoad {
819    type Genome = BitString;
820    type Value = usize;
821
822    fn evaluate(&self, genome: &Self::Genome) -> usize {
823        self.count_complete_schemas(genome.bits())
824    }
825}
826
827/// NK Landscape
828///
829/// A tunable fitness landscape with controllable epistasis (gene interactions).
830/// - N is the genome length
831/// - K is the number of other genes that affect each gene's fitness contribution
832///
833/// Higher K = more rugged landscape with more local optima.
834/// K=0: smooth landscape (separable)
835/// K=N-1: maximally rugged (all genes interact)
836///
837/// The fitness is the average of local fitness contributions.
838#[derive(Clone, Debug)]
839pub struct NkLandscape {
840    /// Genome length (N)
841    n: usize,
842    /// Epistasis degree (K)
843    k: usize,
844    /// Neighbor indices for each gene position
845    neighbors: Vec<Vec<usize>>,
846    /// Fitness contribution lookup tables
847    /// For each gene i, maps (gene i value, neighbor values) -> contribution
848    contributions: Vec<std::collections::HashMap<Vec<bool>, f64>>,
849}
850
851impl NkLandscape {
852    /// Create a new NK Landscape with random fitness contributions
853    ///
854    /// # Arguments
855    /// * `n` - Genome length
856    /// * `k` - Epistasis degree (must be < n)
857    /// * `seed` - Random seed for reproducibility
858    pub fn new(n: usize, k: usize, seed: u64) -> Self {
859        assert!(k < n, "K must be less than N");
860
861        use rand::SeedableRng;
862        let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
863
864        // Generate random neighbors for each position
865        let mut neighbors = Vec::with_capacity(n);
866        for i in 0..n {
867            let mut gene_neighbors: Vec<usize> = (0..n).filter(|&j| j != i).collect();
868            // Shuffle and take first k neighbors
869            use rand::seq::SliceRandom;
870            gene_neighbors.shuffle(&mut rng);
871            gene_neighbors.truncate(k);
872            gene_neighbors.sort();
873            neighbors.push(gene_neighbors);
874        }
875
876        // Generate random fitness contributions for each configuration
877        let mut contributions = Vec::with_capacity(n);
878        for _i in 0..n {
879            let num_configs = 1 << (k + 1); // 2^(k+1) configurations
880            let mut table = std::collections::HashMap::with_capacity(num_configs);
881
882            // Generate all possible configurations
883            for config_bits in 0..num_configs {
884                let config: Vec<bool> = (0..=k).map(|j| (config_bits >> j) & 1 == 1).collect();
885                use rand::Rng;
886                table.insert(config, rng.gen::<f64>());
887            }
888
889            contributions.push(table);
890        }
891
892        Self {
893            n,
894            k,
895            neighbors,
896            contributions,
897        }
898    }
899
900    /// Create an NK Landscape with adjacent neighbors
901    /// (each gene interacts with its k nearest neighbors)
902    pub fn with_adjacent_neighbors(n: usize, k: usize, seed: u64) -> Self {
903        assert!(k < n, "K must be less than N");
904
905        use rand::SeedableRng;
906        let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
907
908        // Adjacent neighbors
909        let mut neighbors = Vec::with_capacity(n);
910        for i in 0..n {
911            let gene_neighbors: Vec<usize> = (1..=k).map(|offset| (i + offset) % n).collect();
912            neighbors.push(gene_neighbors);
913        }
914
915        // Generate random fitness contributions
916        let mut contributions = Vec::with_capacity(n);
917        for _i in 0..n {
918            let num_configs = 1 << (k + 1);
919            let mut table = std::collections::HashMap::with_capacity(num_configs);
920
921            for config_bits in 0..num_configs {
922                let config: Vec<bool> = (0..=k).map(|j| (config_bits >> j) & 1 == 1).collect();
923                use rand::Rng;
924                table.insert(config, rng.gen::<f64>());
925            }
926
927            contributions.push(table);
928        }
929
930        Self {
931            n,
932            k,
933            neighbors,
934            contributions,
935        }
936    }
937
938    /// Get genome length (N)
939    pub fn genome_length(&self) -> usize {
940        self.n
941    }
942
943    /// Get epistasis degree (K)
944    pub fn epistasis(&self) -> usize {
945        self.k
946    }
947
948    /// Evaluate fitness for a bit string
949    pub fn evaluate_bits(&self, bits: &[bool]) -> f64 {
950        assert!(bits.len() >= self.n);
951
952        let mut total = 0.0;
953
954        for i in 0..self.n {
955            // Build configuration: [gene_i, neighbor_1, neighbor_2, ...]
956            let mut config = Vec::with_capacity(self.k + 1);
957            config.push(bits[i]);
958            for &j in &self.neighbors[i] {
959                config.push(bits[j]);
960            }
961
962            // Look up contribution
963            if let Some(&contribution) = self.contributions[i].get(&config) {
964                total += contribution;
965            }
966        }
967
968        // Return average fitness
969        total / self.n as f64
970    }
971}
972
973impl Fitness for NkLandscape {
974    type Genome = BitString;
975    type Value = f64;
976
977    fn evaluate(&self, genome: &Self::Genome) -> f64 {
978        self.evaluate_bits(genome.bits())
979    }
980}
981
982#[cfg(test)]
983mod tests {
984    use super::*;
985    use crate::fitness::traits::Fitness;
986    use approx::assert_relative_eq;
987
988    // Sphere function tests
989    #[test]
990    fn test_sphere_at_optimum() {
991        let sphere = Sphere::new(3);
992        let optimum = RealVector::new(vec![0.0, 0.0, 0.0]);
993        assert_relative_eq!(sphere.evaluate(&optimum), 0.0);
994    }
995
996    #[test]
997    fn test_sphere_non_optimum() {
998        let sphere = Sphere::new(3);
999        let point = RealVector::new(vec![1.0, 2.0, 3.0]);
1000        // 1 + 4 + 9 = 14, negated = -14
1001        assert_relative_eq!(sphere.evaluate(&point), -14.0);
1002    }
1003
1004    #[test]
1005    fn test_sphere_metadata() {
1006        let sphere = Sphere::new(5);
1007        assert_eq!(sphere.name(), "Sphere");
1008        assert_eq!(sphere.dimension(), 5);
1009        assert_eq!(sphere.bounds(), (-5.12, 5.12));
1010        assert_relative_eq!(sphere.optimal_fitness(), 0.0);
1011        assert_eq!(sphere.optimal_solution(), Some(vec![0.0; 5]));
1012    }
1013
1014    // Rastrigin function tests
1015    #[test]
1016    fn test_rastrigin_at_optimum() {
1017        let rastrigin = Rastrigin::new(3);
1018        let optimum = RealVector::new(vec![0.0, 0.0, 0.0]);
1019        assert_relative_eq!(rastrigin.evaluate(&optimum), 0.0, epsilon = 1e-10);
1020    }
1021
1022    #[test]
1023    fn test_rastrigin_non_optimum() {
1024        let rastrigin = Rastrigin::new(2);
1025        let point = RealVector::new(vec![1.0, 1.0]);
1026        // At x=1, cos(2π*1) = 1, so each term is 1 - 10*1 = -9
1027        // Total = 10*2 + 2*(-9) = 20 - 18 = 2, but we need to calculate more precisely
1028        let expected = 10.0 * 2.0
1029            + (1.0 * 1.0 - 10.0 * (2.0 * PI * 1.0).cos())
1030            + (1.0 * 1.0 - 10.0 * (2.0 * PI * 1.0).cos());
1031        assert_relative_eq!(rastrigin.evaluate(&point), -expected, epsilon = 1e-10);
1032    }
1033
1034    #[test]
1035    fn test_rastrigin_metadata() {
1036        let rastrigin = Rastrigin::new(10);
1037        assert_eq!(rastrigin.name(), "Rastrigin");
1038        assert_eq!(rastrigin.dimension(), 10);
1039        assert_eq!(rastrigin.bounds(), (-5.12, 5.12));
1040    }
1041
1042    // Rosenbrock function tests
1043    #[test]
1044    fn test_rosenbrock_at_optimum() {
1045        let rosenbrock = Rosenbrock::new(3);
1046        let optimum = RealVector::new(vec![1.0, 1.0, 1.0]);
1047        assert_relative_eq!(rosenbrock.evaluate(&optimum), 0.0, epsilon = 1e-10);
1048    }
1049
1050    #[test]
1051    fn test_rosenbrock_non_optimum() {
1052        let rosenbrock = Rosenbrock::new(2);
1053        let point = RealVector::new(vec![0.0, 0.0]);
1054        // 100*(0 - 0)^2 + (1 - 0)^2 = 1
1055        assert_relative_eq!(rosenbrock.evaluate(&point), -1.0, epsilon = 1e-10);
1056    }
1057
1058    #[test]
1059    fn test_rosenbrock_metadata() {
1060        let rosenbrock = Rosenbrock::new(5);
1061        assert_eq!(rosenbrock.name(), "Rosenbrock");
1062        assert_eq!(rosenbrock.dimension(), 5);
1063        assert_eq!(rosenbrock.bounds(), (-5.0, 10.0));
1064        assert_eq!(rosenbrock.optimal_solution(), Some(vec![1.0; 5]));
1065    }
1066
1067    // Ackley function tests
1068    #[test]
1069    fn test_ackley_at_optimum() {
1070        let ackley = Ackley::new(3);
1071        let optimum = RealVector::new(vec![0.0, 0.0, 0.0]);
1072        assert_relative_eq!(ackley.evaluate(&optimum), 0.0, epsilon = 1e-10);
1073    }
1074
1075    #[test]
1076    fn test_ackley_metadata() {
1077        let ackley = Ackley::new(10);
1078        assert_eq!(ackley.name(), "Ackley");
1079        assert_eq!(ackley.dimension(), 10);
1080        assert_eq!(ackley.bounds(), (-32.768, 32.768));
1081    }
1082
1083    // Griewank function tests
1084    #[test]
1085    fn test_griewank_at_optimum() {
1086        let griewank = Griewank::new(3);
1087        let optimum = RealVector::new(vec![0.0, 0.0, 0.0]);
1088        assert_relative_eq!(griewank.evaluate(&optimum), 0.0, epsilon = 1e-10);
1089    }
1090
1091    #[test]
1092    fn test_griewank_metadata() {
1093        let griewank = Griewank::new(10);
1094        assert_eq!(griewank.name(), "Griewank");
1095        assert_eq!(griewank.dimension(), 10);
1096        assert_eq!(griewank.bounds(), (-600.0, 600.0));
1097    }
1098
1099    // Schwefel function tests
1100    #[test]
1101    fn test_schwefel_metadata() {
1102        let schwefel = Schwefel::new(10);
1103        assert_eq!(schwefel.name(), "Schwefel");
1104        assert_eq!(schwefel.dimension(), 10);
1105        assert_eq!(schwefel.bounds(), (-500.0, 500.0));
1106    }
1107
1108    // OneMax function tests
1109    #[test]
1110    fn test_onemax_all_ones() {
1111        let onemax = OneMax::new(10);
1112        let genome = BitString::ones(10);
1113        assert_eq!(onemax.evaluate(&genome), 10);
1114    }
1115
1116    #[test]
1117    fn test_onemax_all_zeros() {
1118        let onemax = OneMax::new(10);
1119        let genome = BitString::zeros(10);
1120        assert_eq!(onemax.evaluate(&genome), 0);
1121    }
1122
1123    #[test]
1124    fn test_onemax_mixed() {
1125        let onemax = OneMax::new(5);
1126        let genome = BitString::new(vec![true, false, true, false, true]);
1127        assert_eq!(onemax.evaluate(&genome), 3);
1128    }
1129
1130    // LeadingOnes function tests
1131    #[test]
1132    fn test_leadingones_all_ones() {
1133        let lo = LeadingOnes::new(10);
1134        let genome = BitString::ones(10);
1135        assert_eq!(lo.evaluate(&genome), 10);
1136    }
1137
1138    #[test]
1139    fn test_leadingones_all_zeros() {
1140        let lo = LeadingOnes::new(10);
1141        let genome = BitString::zeros(10);
1142        assert_eq!(lo.evaluate(&genome), 0);
1143    }
1144
1145    #[test]
1146    fn test_leadingones_mixed() {
1147        let lo = LeadingOnes::new(5);
1148        let genome = BitString::new(vec![true, true, false, true, true]);
1149        assert_eq!(lo.evaluate(&genome), 2); // First 2 are 1s, then a 0
1150    }
1151
1152    #[test]
1153    fn test_leadingones_starts_with_zero() {
1154        let lo = LeadingOnes::new(5);
1155        let genome = BitString::new(vec![false, true, true, true, true]);
1156        assert_eq!(lo.evaluate(&genome), 0);
1157    }
1158
1159    // ZDT1 tests
1160    #[test]
1161    fn test_zdt1_pareto_front() {
1162        let zdt1 = Zdt1::new(10);
1163        // On the Pareto front, all x_i = 0 for i > 0, and x_0 varies from 0 to 1
1164        let x = vec![0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1165        let [f1, f2] = zdt1.evaluate(&x);
1166
1167        // f1 should equal x_0
1168        assert_relative_eq!(f1, 0.5, epsilon = 1e-10);
1169
1170        // On Pareto front with g=1: f2 = 1 - sqrt(f1)
1171        assert_relative_eq!(f2, 1.0 - f1.sqrt(), epsilon = 1e-10);
1172    }
1173
1174    // ZDT2 tests
1175    #[test]
1176    fn test_zdt2_pareto_front() {
1177        let zdt2 = Zdt2::new(10);
1178        let x = vec![0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1179        let [f1, f2] = zdt2.evaluate(&x);
1180
1181        assert_relative_eq!(f1, 0.5, epsilon = 1e-10);
1182        // On Pareto front with g=1: f2 = 1 - f1^2
1183        assert_relative_eq!(f2, 1.0 - f1 * f1, epsilon = 1e-10);
1184    }
1185
1186    // Schaffer N.1 tests
1187    #[test]
1188    fn test_schaffer_n1() {
1189        let schaffer = SchafferN1::new();
1190        let [f1, f2] = schaffer.evaluate(0.0);
1191        assert_relative_eq!(f1, 0.0, epsilon = 1e-10);
1192        assert_relative_eq!(f2, 4.0, epsilon = 1e-10);
1193    }
1194
1195    // Levy function tests
1196    #[test]
1197    fn test_levy_at_optimum() {
1198        let levy = Levy::new(3);
1199        let optimum = RealVector::new(vec![1.0, 1.0, 1.0]);
1200        assert_relative_eq!(levy.evaluate(&optimum), 0.0, epsilon = 1e-10);
1201    }
1202
1203    // Dixon-Price tests
1204    #[test]
1205    fn test_dixonprice_metadata() {
1206        let dp = DixonPrice::new(5);
1207        assert_eq!(dp.name(), "Dixon-Price");
1208        assert_eq!(dp.dimension(), 5);
1209    }
1210
1211    // Styblinski-Tang tests
1212    #[test]
1213    fn test_styblinskitang_near_optimum() {
1214        let st = StyblinskiTang::new(2);
1215        let near_opt = RealVector::new(vec![-2.9, -2.9]);
1216        // Should be close to optimal fitness
1217        let fitness = st.evaluate(&near_opt);
1218        let optimal = st.optimal_fitness();
1219        // The returned fitness is negated, so compare negatives
1220        assert!(
1221            fitness > optimal - 1.0,
1222            "Fitness {} should be close to optimal {}",
1223            fitness,
1224            optimal
1225        );
1226    }
1227
1228    // Royal Road tests
1229    #[test]
1230    fn test_royal_road_all_ones() {
1231        let rr = RoyalRoad::new(4, 4); // 16-bit genome
1232        let genome = BitString::ones(16);
1233        let fitness: usize = rr.evaluate(&genome);
1234        assert_eq!(fitness, 4); // All 4 schemas complete
1235    }
1236
1237    #[test]
1238    fn test_royal_road_all_zeros() {
1239        let rr = RoyalRoad::new(4, 4);
1240        let genome = BitString::zeros(16);
1241        let fitness: usize = rr.evaluate(&genome);
1242        assert_eq!(fitness, 0); // No schemas complete
1243    }
1244
1245    #[test]
1246    fn test_royal_road_partial() {
1247        let rr = RoyalRoad::new(4, 4);
1248        // First and third schemas complete
1249        let bits = vec![
1250            true, true, true, true, // Schema 0: complete
1251            false, false, false, false, // Schema 1: empty
1252            true, true, true, true, // Schema 2: complete
1253            true, true, true, false, // Schema 3: incomplete
1254        ];
1255        let genome = BitString::new(bits);
1256        let fitness: usize = rr.evaluate(&genome);
1257        assert_eq!(fitness, 2);
1258    }
1259
1260    #[test]
1261    fn test_royal_road_standard() {
1262        let rr = RoyalRoad::standard();
1263        assert_eq!(rr.genome_length(), 64);
1264        assert_eq!(rr.schema_size, 8);
1265        assert_eq!(rr.num_schemas, 8);
1266    }
1267
1268    // NK Landscape tests
1269    #[test]
1270    fn test_nk_landscape_creation() {
1271        let nk = NkLandscape::new(10, 2, 42);
1272        assert_eq!(nk.genome_length(), 10);
1273        assert_eq!(nk.epistasis(), 2);
1274    }
1275
1276    #[test]
1277    fn test_nk_landscape_deterministic() {
1278        // Same seed should give same landscape
1279        let nk1 = NkLandscape::new(8, 2, 123);
1280        let nk2 = NkLandscape::new(8, 2, 123);
1281
1282        let genome = BitString::new(vec![true, false, true, false, true, false, true, false]);
1283        let f1: f64 = nk1.evaluate(&genome);
1284        let f2: f64 = nk2.evaluate(&genome);
1285
1286        assert_relative_eq!(f1, f2);
1287    }
1288
1289    #[test]
1290    fn test_nk_landscape_fitness_range() {
1291        let nk = NkLandscape::new(10, 3, 42);
1292        let genome = BitString::new(vec![true; 10]);
1293        let fitness: f64 = nk.evaluate(&genome);
1294
1295        // Fitness should be average of contributions, so in [0, 1]
1296        assert!((0.0..=1.0).contains(&fitness));
1297    }
1298
1299    #[test]
1300    fn test_nk_landscape_adjacent() {
1301        let nk = NkLandscape::with_adjacent_neighbors(10, 2, 42);
1302        let genome = BitString::ones(10);
1303        let fitness: f64 = nk.evaluate(&genome);
1304
1305        assert!((0.0..=1.0).contains(&fitness));
1306    }
1307
1308    #[test]
1309    #[should_panic(expected = "K must be less than N")]
1310    fn test_nk_landscape_invalid_k() {
1311        let _nk = NkLandscape::new(5, 5, 42); // K = N is invalid
1312    }
1313}