diff --git a/UCS/Problem_Multiplexer.py b/UCS/Problem_Multiplexer.py
index b0e0a802526aea30fb8f4549ca1d363babf70b50..86487cc50fe1c8a77079843a672d0755a2f79460 100644
--- a/UCS/Problem_Multiplexer.py
+++ b/UCS/Problem_Multiplexer.py
@@ -118,7 +118,7 @@ if __name__ == '__main__':
                 return i
         return None
 
-    bits = 3
+    bits = 11
     instances = 10
 
     generate_complete_multiplexer_data(str(bits)+"Multiplexer_Data_Complete.txt", bits) # 3,6,11,20,37
diff --git a/UCS/Problem_Parity.py b/UCS/Problem_Parity.py
index 55b0f69353f2f99e0e624599a3eb91a7d55af1b1..cd7f8756225c25bf3a57b560ccc78ea68fa10c36 100644
--- a/UCS/Problem_Parity.py
+++ b/UCS/Problem_Parity.py
@@ -113,7 +113,7 @@ if __name__ == '__main__':
             return bits
         return None
 
-    bits = 6
+    bits = 11
     instances = 10
 
     #generate_parity_data(str(bits)+"-"+str(instances)+"Parity_Data.txt", bits, instances)
diff --git a/UCS/UCS_Configuration_File.txt b/UCS/UCS_Configuration_File.txt
index c0b3fc5a435a1ee309442affd8ebf68e63643888..640167ee3e5b0c2b6478019de922907b757f2af4 100644
--- a/UCS/UCS_Configuration_File.txt
+++ b/UCS/UCS_Configuration_File.txt
@@ -4,13 +4,13 @@
 ###### Major Run Parameters - Essential to be set correctly for a successful run of the algorithm
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 datasetDirectory=Demo_Datasets					# Directory/Path of training and testing datasets (assumes they are in the same directory)
-trainFile=6Multiplexer_Data_Complete.txt		        # FileName of training dataset
+trainFile=11Parity_Data_Complete.txt		# FileName of training dataset
 testFile=None									# FileName of testing dataset.  If no testing data available or desired, put 'None'.
 outputDirectory=Local_Output					# Output file directory
 outputFile=ExampleRun							# FileName of output files.
 learningIterations=20000					# Specify complete algorithm evaluation checkpoints and maximum number of learning iterations (e.g. 1000.2000.5000 = A maximum of 5000 learning iterations with evaluations at 1000, 2000, and 5000 iterations)
 N=1000											# Maximum size of the rule population (a.k.a. Micro-classifier population size, where N is the sum of the classifier numerosities in the population)
-p_spec=0.9										# The probability of specifying an attribute when covering. (1-p_spec = the probability of adding '#' in ternary rule representations). Greater numbers of attributes in a dataset will require lower values of p_spec.
+p_spec=0.5										# The probability of specifying an attribute when covering. (1-p_spec = the probability of adding '#' in ternary rule representations). Greater numbers of attributes in a dataset will require lower values of p_spec.
 kfold=5									        # if not used, set to 0.
 
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
diff --git a/UCS/UCS_DataManagement.py b/UCS/UCS_DataManagement.py
index cf71636b4816bcbc4bf89a1471508ff7e8da7eaf..f2e151f990cf812a5a6e2b610a137ea4ba4509a9 100644
--- a/UCS/UCS_DataManagement.py
+++ b/UCS/UCS_DataManagement.py
@@ -328,36 +328,9 @@ class DataManagement:
             formatted)  # One time randomization of the order the of the phenotypes in the data, so that if the data was ordered by phenotype, this potential learning bias (based on phenotype ordering) is eliminated.
         return formatted
 
-    def splitFolds(self):
+    def splitDataIntoKSets(self):
         """ divide data set into kfold sets. """
         data_size = len(self.trainFormatted)
-        class_counts = [0] * len(self.phenotypeList)
-        for instance in self.trainFormatted:
-            class_counts[self.phenotypeList.index(instance[1])] += 1
-        fold_size = int(data_size / cons.kfold)
-        split_again = True
-        while split_again:
-            split_again = False
-            self.folds = [[] for _ in range(cons.kfold)]
-            start_point = 0
-            for i in range(cons.kfold):
-                end_point = start_point + fold_size
-                if i < data_size % cons.kfold:
-                    end_point += 1
-                self.folds[i] = self.trainFormatted[start_point:end_point]
-                start_point = end_point
-                fold_class_counts = [0] * len(self.phenotypeList)
-                for instance in self.folds[i]:
-                    fold_class_counts[self.phenotypeList.index(instance[1])] += 1
-                for j in range(len(self.phenotypeList)):
-                    if fold_class_counts[j] == class_counts[j]:
-                        random.shuffle(self.trainFormatted)
-                        split_again = True
-
-    def splitFolds2(self):
-        """ divide data set into kfold sets. """
-        self.trainFormatted = stratify(self.trainFormatted)
-        data_size = len(self.trainFormatted)
         self.folds = [[] for _ in range(cons.kfold)]
         for fold_id in range(cons.kfold):
             fold_size = int(data_size / cons.kfold)
@@ -380,31 +353,3 @@ class DataManagement:
         self.numTestphenotypes = len(self.testFormatted)
         print("DataManagement: Number of Instances = " + str(self.numTrainphenotypes))
         print("DataManagement: Number of Instances = " + str(self.numTestphenotypes))
-
-
-def stratify(all_data):
-    """ divide data set into kfold sets. """
-    # sort by class
-    index = 1
-    numb_instances = len(all_data)
-    while index < numb_instances:
-        instance1 = all_data[index - 1]
-        for j in range(index, numb_instances):
-            instance2 = all_data[j]
-            if instance1[1] == instance2[1]:
-                # swap(index, j)
-                temp = all_data[index]
-                all_data[index] = all_data[j]
-                all_data[j] = temp
-                index += 1
-        index += 1
-    # rearrange classes to kfold trunks.
-    stratified_data = []
-    start = 0
-    while len(stratified_data) < numb_instances:
-        j = start
-        while j < numb_instances:
-            stratified_data.append(all_data[j])
-            j += cons.kfold
-        start += 1
-    return stratified_data
diff --git a/UCS/UCS_Run.py b/UCS/UCS_Run.py
index e1ba168d17046559b98d30aa3eb385c54893762c..c3522ee7a344d2b57c4f89e3f1ebab1b65b34463 100644
--- a/UCS/UCS_Run.py
+++ b/UCS/UCS_Run.py
@@ -66,7 +66,7 @@ if __name__ == '__main__':
     t0 = time.clock()
     if cons.kfold > 0:
         total_instances = env.formatData.numTrainphenotypes
-        env.formatData.splitFolds2()
+        env.formatData.splitDataIntoKSets()
         accurate_numbs = [0.0] * cons.kfold
         for i in range(cons.kfold):
             env.formatData.selectTrainTestSets(i)
diff --git a/UCS/__pycache__/UCS_DataManagement.cpython-36.pyc b/UCS/__pycache__/UCS_DataManagement.cpython-36.pyc
index 90e92a9f349e96bd7db0f2eedbcd5b1fc0463cb0..090d386c4a3ac90637f80a54319103df117ded60 100644
Binary files a/UCS/__pycache__/UCS_DataManagement.cpython-36.pyc and b/UCS/__pycache__/UCS_DataManagement.cpython-36.pyc differ
diff --git a/XCS/Problem_Multiplexer.py b/XCS/Problem_Multiplexer.py
index a97a9ab7d4fb823ffce72f6d5ba0fc5ebdc02182..c0ea96390e75cee5d03599f1943dec8c68a53c17 100644
--- a/XCS/Problem_Multiplexer.py
+++ b/XCS/Problem_Multiplexer.py
@@ -119,7 +119,7 @@ if __name__ == '__main__':
         return None
 
 
-    bits = 6
+    bits = 11
     instances = 10
 
     generate_complete_multiplexer_data(str(bits) + "Multiplexer_Data_Complete.txt", bits)  # 3,6,11,20,37
diff --git a/XCS/XCS_Configuration_File.txt b/XCS/XCS_Configuration_File.txt
index f5dcc65fedb487e66e925c808faa86ba2407e864..49cf4e8c5f454e120a359bb20ca0402616ce5378 100644
--- a/XCS/XCS_Configuration_File.txt
+++ b/XCS/XCS_Configuration_File.txt
@@ -4,14 +4,14 @@
 ###### Major Run Parameters - Essential to be set correctly for a successful run of the algorithm
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 datasetDirectory=Demo_Datasets			    # Directory/Path of training and testing datasets (assumes they are in the same directory)
-trainFile=6Multiplexer_Data_Complete.txt	# Path/FileName of training dataset
+trainFile=11Multiplexer_Data_Complete.txt	# Path/FileName of training dataset
 testFile=None								# Path/FileName of testing dataset.  If no testing data available or desired, put 'None'.
 outputDirectory=Local_Output					    # Path/NewName for new algorithm output files. Note: Do not give a file extension, this is done automatically.
 outputFile=ExampleRun							# FileName of output files.
 learningIterations=20000						# Specify complete algorithm evaluation checkpoints and maximum number of learning iterations (e.g. 1000.2000.5000 = A maximum of 5000 learning iterations with evaluations at 1000, 2000, and 5000 iterations)
 N=1000											# Maximum size of the rule population (a.k.a. Micro-classifier population size, where N is the sum of the classifier numerosities in the population)
 p_spec=0.5										# The probability of specifying an attribute when covering. (1-p_spec = the probability of adding '#' in ternary rule representations). Greater numbers of attributes in a dataset will require lower values of p_spec.
-kfold=5									        # if not used, set to 0.
+kfold=10									        # if not used, set to 0.
 
 ######--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 ###### Logistical Run Parameters
diff --git a/XCS/XCS_DataManagement.py b/XCS/XCS_DataManagement.py
index 44d08d352ab5c28d83930b0a28c33cec0827ddf0..cfffdcde0e836494e942e8e19e9c434cce7e3648 100644
--- a/XCS/XCS_DataManagement.py
+++ b/XCS/XCS_DataManagement.py
@@ -293,36 +293,9 @@ class DataManagement:
         # random.shuffle(formatted) #One time randomization of the order the of the instances in the data, so that if the data was ordered by phenotype, this potential learning bias (based on instance ordering) is eliminated.
         return formatted
 
-    def splitFolds(self):
+    def splitDataIntoKSets(self):
         """ divide data set into kfold sets. """
         data_size = len(self.trainFormatted)
-        class_counts = [0] * len(self.phenotypeList)
-        for instance in self.trainFormatted:
-            class_counts[self.phenotypeList.index(instance[1])] += 1
-        fold_size = int(data_size / cons.kfold)
-        split_again = True
-        while split_again:
-            split_again = False
-            self.folds = [[] for _ in range(cons.kfold)]
-            start_point = 0
-            for i in range(cons.kfold):
-                end_point = start_point + fold_size
-                if i < data_size % cons.kfold:
-                    end_point += 1
-                self.folds[i] = self.trainFormatted[start_point:end_point]
-                start_point = end_point
-                fold_class_counts = [0] * len(self.phenotypeList)
-                for instance in self.folds[i]:
-                    fold_class_counts[self.phenotypeList.index(instance[1])] += 1
-                for j in range(len(self.phenotypeList)):
-                    if fold_class_counts[j] == class_counts[j]:
-                        random.shuffle(self.trainFormatted)
-                        split_again = True
-
-    def splitFolds2(self):
-        """ divide data set into kfold sets. """
-        self.trainFormatted = stratify(self.trainFormatted)
-        data_size = len(self.trainFormatted)
         self.folds = [[] for _ in range(cons.kfold)]
         for fold_id in range(cons.kfold):
             fold_size = int(data_size / cons.kfold)
@@ -345,31 +318,3 @@ class DataManagement:
         self.numTestphenotypes = len(self.formatted_test_data)
         print("DataManagement: Number of Instances = " + str(self.numTrainphenotypes))
         print("DataManagement: Number of Instances = " + str(self.numTestphenotypes))
-
-
-def stratify(all_data):
-    """ divide data set into kfold sets. """
-    # sort by class
-    index = 1
-    numb_instances = len(all_data)
-    while index < numb_instances:
-        instance1 = all_data[index - 1]
-        for j in range(index, numb_instances):
-            instance2 = all_data[j]
-            if instance1[1] == instance2[1]:
-                # swap(index, j)
-                temp = all_data[index]
-                all_data[index] = all_data[j]
-                all_data[j] = temp
-                index += 1
-        index += 1
-    # rearrange classes to kfold trunks.
-    stratified_data = []
-    start = 0
-    while len(stratified_data) < numb_instances:
-        j = start
-        while j < numb_instances:
-            stratified_data.append(all_data[j])
-            j += cons.kfold
-        start += 1
-    return stratified_data
diff --git a/XCS/XCS_Run.py b/XCS/XCS_Run.py
index e4ac230fb787dc59b28e64deaf36eb549108c4da..8fa5de69cd2a755af1732c093f77101b3832aea5 100644
--- a/XCS/XCS_Run.py
+++ b/XCS/XCS_Run.py
@@ -52,14 +52,13 @@ if __name__ == '__main__':
     t0 = time.clock()
     if cons.kfold > 0:
         total_instances = env.format_data.numTrainphenotypes
-        env.format_data.splitFolds2()
+        env.format_data.splitDataIntoKSets()
         accurate_numbs = [0.0] * cons.kfold
         for i in range(cons.kfold):
             env.format_data.selectTrainTestSets(i)
             cons.parseIterations()  # Identify the maximum number of learning iterations as well as evaluation checkpoints.
             XCS().run_XCS()
             accuracy = XCS.standard_accuracy
-            # accuracy = XCS().run_XCS()[0]
             accurate_numbs[i] = accuracy * env.format_data.numTestphenotypes
         print("AVERAGE ACCURACY AFTER " + str(cons.kfold) + "-FOLD CROSS VALIDATION is " + str(
             sum(accurate_numbs) / total_instances))
@@ -68,4 +67,4 @@ if __name__ == '__main__':
         XCS().run_XCS()
     t1 = time.clock()
     total = t1 - t0
-    print("Run time in seconds: %.2f" % round(total, 2))
+    print("Total un time in seconds: %.2f" % round(total, 2))