diff --git a/config/__pycache__/config.cpython-36.pyc b/config/__pycache__/config.cpython-36.pyc
index 690e69f069ede00651f0b3c6eaae4b77b2dd567b..1f2c95cdf0ef9c2545404baed33b5657a108ac46 100644
Binary files a/config/__pycache__/config.cpython-36.pyc and b/config/__pycache__/config.cpython-36.pyc differ
diff --git a/config/config.py b/config/config.py
index 42f55291d2b27b284f7168363d369f7c10db9fa8..8efc6a9f27f126e0623af676a42e04a1ae7770e1 100644
--- a/config/config.py
+++ b/config/config.py
@@ -1,4 +1,3 @@
-import numpy as np
 import configparser
 """
 Configuration Parser:
@@ -75,10 +74,10 @@ class Config(object):
 		self.cfg.read(default_path)
 		if config_file is not None:
 			path = base_path + config_file
-			self.cfg.read(path)	
+			self.cfg.read(path)
+
+	def read_list(self,input_str):
 
-	def read_list(self,input_str,dtype=np.int32):
-			
 		task_labels = []
 		count = -1
 		for i in input_str:
diff --git a/results/mnist/result.png b/results/mnist/result.png
index 4e7dc261692d494b789d47ac82892ecac86d65c8..1c6d0ced9f519de65107224cf135d913991bc68e 100644
Binary files a/results/mnist/result.png and b/results/mnist/result.png differ
diff --git a/split_mnist.py b/split_mnist.py
index cc2ed5d839211a1ce3ce71fd621e4add20c11f85..0e5fd6943979f34236a19dbeeeec867563259307 100644
--- a/split_mnist.py
+++ b/split_mnist.py
@@ -1,5 +1,5 @@
 from keras.models import Model
-from keras.layers import Dense, Input, Conv2D, Flatten, MaxPooling2D
+from keras.layers import Dense, Input
 from configuration import conf
 from utils.dataloader import Sequential_loader
 from utils.model_utils import mask_layer_by_task
diff --git a/utils/__pycache__/dataloader.cpython-36.pyc b/utils/__pycache__/dataloader.cpython-36.pyc
index 2aca37454c4aa6041ffe93a069f661c8d7527c6d..dfbca71e0147ebb82c9dddcc275269f50f6106b6 100644
Binary files a/utils/__pycache__/dataloader.cpython-36.pyc and b/utils/__pycache__/dataloader.cpython-36.pyc differ
diff --git a/utils/__pycache__/predict_utils.cpython-36.pyc b/utils/__pycache__/predict_utils.cpython-36.pyc
index 0da2764197a979d5da29517e7fd5eb87b7059480..fb68c8404c6f5971851e90a532f92a8de8153a36 100644
Binary files a/utils/__pycache__/predict_utils.cpython-36.pyc and b/utils/__pycache__/predict_utils.cpython-36.pyc differ
diff --git a/utils/data_utils.py b/utils/data_utils.py
index 7a20e6adad39974be4e3a306de472cbba7d1397d..80296baab25b8ec0b41805cc81e62a43c4477fd6 100644
--- a/utils/data_utils.py
+++ b/utils/data_utils.py
@@ -4,10 +4,10 @@ import numpy as np
 import pandas as pd
 
 def load_sub_omniglot(path,n=0):
-	
-	'''
+
+	"""
 	path => Path of train directory or test directory
-	'''
+	"""
 	X=[]
 	y = []
 	cat_dict = {}
@@ -16,35 +16,35 @@ def load_sub_omniglot(path,n=0):
 
 	# we load every alphabet seperately so we can isolate them later
 	for alphabet in os.listdir(path):
-	    if alphabet == '.DS_Store':
-	    	continue
-	    print("loading alphabet: " + alphabet)
-	    lang_dict[alphabet] = [curr_y,None]
-	    alphabet_path = os.path.join(path,alphabet)
-	    
-	    # every letter/category has it's own column in the array, so  load seperately
-	    for letter in os.listdir(alphabet_path):
-	        if letter == '.DS_Store':
-	        	continue
-	        cat_dict[curr_y] = (alphabet, letter)
-	        category_images=[]
-	        letter_path = os.path.join(alphabet_path, letter)
-	        
-	        # read all the images in the current category
-	        for filename in os.listdir(letter_path):
-	            image_path = os.path.join(letter_path, filename)
-	            image = imread(image_path)
-	            category_images.append(image)
-	            y.append(curr_y)
-	        try:
-	            #X.append(np.stack(category_images))
-	            X += list(np.stack(category_images))
-	        # edge case  - last one
-	        except ValueError as e:
-	            print(e)
-	            print("error - category_images:", category_images)
-	        curr_y += 1
-	        lang_dict[alphabet][1] = curr_y - 1
+		if alphabet == '.DS_Store':
+			continue
+		print("loading alphabet: " + alphabet)
+		lang_dict[alphabet] = [curr_y,None]
+		alphabet_path = os.path.join(path,alphabet)
+
+		# every letter/category has it's own column in the array, so  load seperately
+		for letter in os.listdir(alphabet_path):
+			if letter == '.DS_Store':
+				continue
+			cat_dict[curr_y] = (alphabet, letter)
+			category_images=[]
+			letter_path = os.path.join(alphabet_path, letter)
+
+			# read all the images in the current category
+			for filename in os.listdir(letter_path):
+				image_path = os.path.join(letter_path, filename)
+				image = imread(image_path)
+				category_images.append(image)
+				y.append(curr_y)
+			try:
+				#X.append(np.stack(category_images))
+				X += list(np.stack(category_images))
+			# edge case  - last one
+			except ValueError as e:
+				print(e)
+				print("error - category_images:", category_images)
+			curr_y += 1
+			lang_dict[alphabet][1] = curr_y - 1
 	y = np.vstack(y)
 	X = np.stack(X)
 	print(X.shape,y.shape,len(np.unique(y)))
@@ -96,16 +96,6 @@ def load_tihm(path=None):
 
 
 
-def load_agitation(path=None):
-	if path is None:
-		path = './data/aigitation'
-
-	data = []
-	label = []
-
-
-
-
 
 
 
diff --git a/utils/dataloader.py b/utils/dataloader.py
index ea1a7bbb47e0d75eda968dcef09e4ac37e1c5e17..ff2a0cb035d33fadc3c4b6322da4a39e42a933ec 100644
--- a/utils/dataloader.py
+++ b/utils/dataloader.py
@@ -5,189 +5,119 @@ from configuration import conf
 
 
 class Baseline_loader(object):
-	def __init__(self):
-		try:
-			load_func = getattr(load_data,'load_'+ conf.dataset_name)
-			self.data = load_func(conf.is_conv)
-		except:
-			assert ValueError('Dataset is not available ... ')
-		conf.num_samples = self.data['X_train'].shape[0]
-		conf.shape_of_sample = self.data['X_train'].shape[1:]
-
-	@property
-	def num_classes(self):
-		return len(np.unique(self.data['y_train']))
-
-	@property
-	def num_samples(self):
-		return self.data['X_train'].shape[1]
-
-	@property
-	def shape_of_sample(self):
-		return self.data['X_train'].shape[1:]
-
-	@property
-	def is_flatten(self):
-		return len(self.data['X_train']) == 2
-		
-
-	def sample(self, dataset='train', batch_size=None):
-		if batch_size is None:
-			batch_size = conf.batch_size
-		assert dataset in ['train', 'val', 'test']
-
-		N = self.data['X_' + dataset].shape[0]
-		idx_N = np.random.choice(N, batch_size, replace=False)
-
-		images, labels = self.data['X_' + dataset][idx_N], self.data['y_' + dataset][idx_N]
-
-		return images, labels
-
-	def build_iterator(self, batch_size=None):
-		if batch_size is None:
-			batch_size = conf.batch_size
-		self.init = {}
-		with tf.name_scope('data'):
-			train_data = tf.data.Dataset.from_tensor_slices(self.data['X_train']).batch(batch_size)
-			test_data = tf.data.Dataset.from_tensor_slices(self.data['X_test']).batch(batch_size)
-			iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)
-			img,label = iterator.get_next()
-			self.init['train'] = iterator.make_initializer(train_data)
-			self.init['test'] = iterator.make_initializer(test_data)
-
-	def get_whole_dataset(self):
-		return self.data
-
-			
-
-class Sequential_loader(object):
-	def __init__(self):
-		self.data = Load_data().load()
-		self._task_idx = 0
-		num_samples = 0
-		for i in range(len(self.data)):
-			num_samples += self.data[i]['X_train'].shape[0]
-		conf.num_samples = num_samples
-
-	@property
-	def num_classes(self):
-		num_classes = 0
-		for i in range(len(self.data)):
-			num_classes += len(np.unique(self.data[i]['y_train']))
-		return num_classes
-
-	@property
-	def num_samples(self):
-		return conf.num_samples
-
-	@property
-	def shape_of_sample(self):
-		return self.data[0]['X_train'].shape[1:]
-
-	@property
-	def is_flatten(self):
-		return len(self.data[0]['X_train']) == 2
-		
-
-	def sample(self, task_idx=None, dataset='train', batch_size=None,whole_set=False):
-		if batch_size is None:
-			batch_size = conf.batch_size
-		if task_idx is None:
-			task_idx = self._task_idx
-		assert dataset in ['train', 'val', 'test']
-
-		if whole_set:
-			return self.data[task_idx]['X_' + dataset], self.data[task_idx]['y_' + dataset]
-
-		N = self.data[task_idx]['X_' + dataset].shape[0]
-		idx_N = np.random.choice(N, batch_size, replace=False)
-
-		images, labels = self.data[task_idx]['X_' + dataset][idx_N], self.data[task_idx]['y_' + dataset][idx_N]
-		if labels.ndim == 1:
-			labels = labels[:,np.newaxis]
-		return images, labels
-
-
-
-	def _build_iterator(self, batch_size=None):
-		if batch_size is None:
-			batch_size = conf.batch_size
-		self.data_init = {}
-		with tf.name_scope('data'):
-			train_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_train'],self.data[0]['y_train'])).batch(batch_size)
-			test_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_test'],self.data[0]['y_test'])).batch(batch_size)
-			iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)
-			self.img_holder,self.label_holder = iterator.get_next()
-			self.data_init[0] = {}
-			self.data_init[0]['train'] = iterator.make_initializer(train_data)
-			self.data_init[0]['test'] = iterator.make_initializer(test_data)
-			for i in range(1,len(self.data)):
-				train_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_train'],self.data[0]['y_train'])).batch(batch_size)
-				test_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_test'],self.data[0]['y_test'])).batch(batch_size)
-				self.data_init[i] = {}
-				self.data_init[i]['train'] = iterator.make_initializer(train_data)
-				self.data_init[i]['test'] = iterator.make_initializer(test_data)
-
-	@property
-	def task_idx(self):
-		return self._task_idx
-	
-
-	@task_idx.setter
-	def task_idx(self,idx):
-		self._task_idx = idx
-		print('------------ Training Task Index : %d ------------'%self._task_idx)
-	
-
-	def initial_data(self,task_idx=None):
-		if not hasattr(self,'data_init'):
-			self._build_iterator()
-		if task_idx is None:
-			task_idx = self._task_idx
-
-		return self.data_init[task_idx]
-
-	def get_holder(self):
-		if conf.enable_iterator:
-			return self.img_holder, self.label_holder
-		else:
-			img_holder = tf.placeholder(dtype=tf.float32,shape=[None,self.shape_of_sample[0]])
-			label_holder = tf.placeholder(dtype=tf.float32,shape=[None,conf.num_classes] if conf.enable_one_hot else [None,1])
-
-			return img_holder, label_holder
-	
-
-	def get_whole_dataset(self):
-		return self.data
+    def __init__(self):
+        try:
+            load_func = getattr(Load_data, 'load_' + conf.dataset_name)
+            self.data = load_func(conf.is_conv)
+        except:
+            assert ValueError('Dataset is not available ... ')
+        conf.num_samples = self.data['X_train'].shape[0]
+        conf.shape_of_sample = self.data['X_train'].shape[1:]
 
+    @property
+    def num_classes(self):
+        return len(np.unique(self.data['y_train']))
 
+    @property
+    def num_samples(self):
+        return self.data['X_train'].shape[1]
 
+    @property
+    def shape_of_sample(self):
+        return self.data['X_train'].shape[1:]
 
+    @property
+    def is_flatten(self):
+        return len(self.data['X_train']) == 2
 
+    def sample(self, dataset='train', batch_size=None):
+        if batch_size is None:
+            batch_size = conf.batch_size
+        assert dataset in ['train', 'val', 'test']
 
+        N = self.data['X_' + dataset].shape[0]
+        idx_N = np.random.choice(N, batch_size, replace=False)
 
+        images, labels = self.data['X_' + dataset][idx_N], self.data['y_' + dataset][idx_N]
 
+        return images, labels
 
+    def get_whole_dataset(self):
+        return self.data
 
 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-	
-
-	
+class Sequential_loader(object):
+    def __init__(self):
+        self.data = Load_data().load()
+        self._task_idx = 0
+        num_samples = 0
+        for i in range(len(self.data)):
+            num_samples += self.data[i]['X_train'].shape[0]
+        conf.num_samples = num_samples
+
+    @property
+    def num_classes(self):
+        num_classes = 0
+        for i in range(len(self.data)):
+            num_classes += len(np.unique(self.data[i]['y_train']))
+        return num_classes
+
+    @property
+    def num_samples(self):
+        return conf.num_samples
+
+    @property
+    def shape_of_sample(self):
+        return self.data[0]['X_train'].shape[1:]
+
+    @property
+    def is_flatten(self):
+        return len(self.data[0]['X_train']) == 2
+
+    def sample(self, task_idx=None, dataset='train', batch_size=None, whole_set=False):
+        if batch_size is None:
+            batch_size = conf.batch_size
+        if task_idx is None:
+            task_idx = self._task_idx
+        assert dataset in ['train', 'val', 'test']
+
+        if whole_set:
+            return self.data[task_idx]['X_' + dataset], self.data[task_idx]['y_' + dataset]
+
+        N = self.data[task_idx]['X_' + dataset].shape[0]
+        idx_N = np.random.choice(N, batch_size, replace=False)
+
+        images, labels = self.data[task_idx]['X_' + dataset][idx_N], self.data[task_idx]['y_' + dataset][idx_N]
+        if labels.ndim == 1:
+            labels = labels[:, np.newaxis]
+        return images, labels
+
+    @property
+    def task_idx(self):
+        return self._task_idx
+
+    @task_idx.setter
+    def task_idx(self, idx):
+        self._task_idx = idx
+        print('------------ Training Task Index : %d ------------' % self._task_idx)
+
+    def initial_data(self, task_idx=None):
+        if not hasattr(self, 'data_init'):
+            self._build_iterator()
+        if task_idx is None:
+            task_idx = self._task_idx
+
+        return self.data_init[task_idx]
+
+    def get_holder(self):
+        if conf.enable_iterator:
+            return self.img_holder, self.label_holder
+        else:
+            img_holder = tf.placeholder(dtype=tf.float32, shape=[None, self.shape_of_sample[0]])
+            label_holder = tf.placeholder(dtype=tf.float32,
+                                          shape=[None, conf.num_classes] if conf.enable_one_hot else [None, 1])
+
+            return img_holder, label_holder
+
+    def get_whole_dataset(self):
+        return self.data
diff --git a/utils/predict_utils.py b/utils/predict_utils.py
index 1b2e82c8ab3426fc72a1803bbc06fd0ea1bdd65c..38bb4db0d41fdb1cb3c3d5623ec236b6bd73c950 100644
--- a/utils/predict_utils.py
+++ b/utils/predict_utils.py
@@ -25,7 +25,6 @@ def block_likelihood(res, keep_dim=False):
 def get_task_likelihood(model, learned_task, test_task, data_loader):
     task_likelihood = []
     task_likelihood_var = []
-    tlh_prediction = []
     pred = []
     x, y = data_loader.sample(test_task, whole_set=True, dataset='test')
     for learnd_idx in range(learned_task + 1):