diff --git a/XCS/RESULTS_FILE.txt b/XCS/RESULTS_FILE.txt
index ae1a1cc5ed4810813cd238a1c1603c6e352540c1..c6011ae1a0fa682c52655d7897ae6a531a3f5ade 100644
--- a/XCS/RESULTS_FILE.txt
+++ b/XCS/RESULTS_FILE.txt
@@ -1,3 +1,10 @@
 Rule Compaction XCS
 I = 10000
 
+
+ Accuracy: 0.56494140625 Total time: 91.89 Rules: 6
+ Accuracy: 0.58984375 Total time: 92.94 Rules: 18
+ Accuracy: 0.6123046875 Total time: 92.56 Rules: 9
+ Accuracy: 0.56396484375 Total time: 92.41 Rules: 12
+ Accuracy: 0.60302734375 Total time: 92.17 Rules: 14
+ Accuracy: 0.6201171875 Total time: 92.56 Rules: 4
diff --git a/XCS/XCS_ClassifierSet.py b/XCS/XCS_ClassifierSet.py
index eeca4e4a04ea07763edf8e04b0b5cccf6c3a76a4..6aab06fe8334a682c87060285b53d1e1a2b6e216 100644
--- a/XCS/XCS_ClassifierSet.py
+++ b/XCS/XCS_ClassifierSet.py
@@ -510,6 +510,8 @@ class ClassifierSet:
         while i < len(self.population):
             cl = self.population[i]
             if cl.action_cnt <= cons.theta_del or cl.error >= cons.e0: #error threshold e0
+            #if cl.error >= cons.e0:  # error threshold e0 -> I am going to remove inaccurate classifiers, but leave the inexperienced.
+                                    # This gives better accuracy, but too many rules for "people comprehension, i.e. 80. 64%
                 self.micro_size -= cl.numerosity
                 self.population.pop(i)
             else:
diff --git a/XCS/XCS_Configuration_File.txt b/XCS/XCS_Configuration_File.txt
index e9fda4b69252c44d4484e4323fa32ec653be660a..aa6282aedf9e4c94728edbbd7eb50fa1aa5c2003 100644
--- a/XCS/XCS_Configuration_File.txt
+++ b/XCS/XCS_Configuration_File.txt
@@ -27,7 +27,7 @@ trackingFrequency=0							    # Specifies the number of iterations before each e
 ###### Learning Parameters - Generally just use default values.
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 nu=5											# (v) Power parameter used to determine the importance of high accuracy when calculating fitness. (typically set to 5, recommended setting of 1 in noisy data)
-chi=0.8										# (X) The probability of applying crossover in the GA. (typically set to 0.5-1.0)
+chi=0.8										    # (X) The probability of applying crossover in the GA. (typically set to 0.5-1.0)
 gamma=0										    # Reinforcement Learning factor
 phi=0											# If total prediction in matchset is less than phi times the mean prediction of population, covering occurs - not used.
 upsilon=0.4										# (u) The probability of mutating an allele within an offspring.(typically set to 0.1-0.5)
@@ -55,7 +55,7 @@ doActionSetSubsumption=0						# Activate Subsumption? (1 is True, 0 is False).
 selectionMethod=tournament						# Select GA parent selection strategy ('tournament' or 'roulette')
 differentParent=0								# Force different parents in tournament selection or not
 theta_sel=0.5									# The fraction of the correct set to be included in tournament selection.
-crossoverMethod=uniform                        # option for type of crossover
+crossoverMethod=uniform                         # option for type of crossover
 
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 ###### PopulationReboot - An option to begin e-LCS learning from an existing, saved rule population. Note that the training data is re-shuffled during a reboot.