diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..234705ab46fef7234934e29ad3a9f93206bf8bd2
Binary files /dev/null and b/.DS_Store differ
diff --git a/.idea/Task_likelihood.iml b/.idea/Task_likelihood.iml
new file mode 100644
index 0000000000000000000000000000000000000000..5c88ce7c6d95b3ca64e0194727baeee233d72e2f
--- /dev/null
+++ b/.idea/Task_likelihood.iml
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="jdk" jdkName="Python 3.6 (tensorflow)" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="TestRunnerService">
+    <option name="PROJECT_TEST_RUNNER" value="Unittests" />
+  </component>
+</module>
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..281715b4fef1e727e66331c2e8eaf10e7e9222f7
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6 (tensorflow)" project-jdk-type="Python SDK" />
+</project>
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..02cd9d3c4826f599bb7b729a08498608b57472eb
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/Task_likelihood.iml" filepath="$PROJECT_DIR$/.idea/Task_likelihood.iml" />
+    </modules>
+  </component>
+</project>
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000000000000000000000000000000000000..c557f00ec4f3084715eb411459a61bf94bcb92e4
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ChangeListManager">
+    <list default="true" id="99550bc5-aa3b-44b3-94d7-b84afc45844f" name="Default Changelist" comment="" />
+    <option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
+    <option name="SHOW_DIALOG" value="false" />
+    <option name="HIGHLIGHT_CONFLICTS" value="true" />
+    <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
+    <option name="LAST_RESOLUTION" value="IGNORE" />
+  </component>
+  <component name="ProjectId" id="1UWaMsw6zBMWl6bWHnCTXed0kq5" />
+  <component name="PropertiesComponent">
+    <property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
+  </component>
+  <component name="RunDashboard">
+    <option name="ruleStates">
+      <list>
+        <RuleState>
+          <option name="name" value="ConfigurationTypeDashboardGroupingRule" />
+        </RuleState>
+        <RuleState>
+          <option name="name" value="StatusDashboardGroupingRule" />
+        </RuleState>
+      </list>
+    </option>
+  </component>
+  <component name="RunManager" selected="Python.learn_consistent">
+    <configuration name="learn_consistent" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="Task_likelihood" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/learn_consistent.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="true" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <configuration name="split_mnit" type="PythonConfigurationType" factoryName="Python" temporary="true">
+      <module name="Task_likelihood" />
+      <option name="INTERPRETER_OPTIONS" value="" />
+      <option name="PARENT_ENVS" value="true" />
+      <option name="SDK_HOME" value="" />
+      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
+      <option name="IS_MODULE_SDK" value="true" />
+      <option name="ADD_CONTENT_ROOTS" value="true" />
+      <option name="ADD_SOURCE_ROOTS" value="true" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/split_mnit.py" />
+      <option name="PARAMETERS" value="" />
+      <option name="SHOW_COMMAND_LINE" value="true" />
+      <option name="EMULATE_TERMINAL" value="false" />
+      <option name="MODULE_MODE" value="false" />
+      <option name="REDIRECT_INPUT" value="false" />
+      <option name="INPUT_FILE" value="" />
+      <method v="2" />
+    </configuration>
+    <list>
+      <item itemvalue="Python.learn_consistent" />
+      <item itemvalue="Python.split_mnit" />
+    </list>
+    <recent_temporary>
+      <list>
+        <item itemvalue="Python.learn_consistent" />
+        <item itemvalue="Python.split_mnit" />
+      </list>
+    </recent_temporary>
+  </component>
+  <component name="SvnConfiguration">
+    <configuration />
+  </component>
+  <component name="TaskManager">
+    <task active="true" id="Default" summary="Default task">
+      <changelist id="99550bc5-aa3b-44b3-94d7-b84afc45844f" name="Default Changelist" comment="" />
+      <created>1575474759490</created>
+      <option name="number" value="Default" />
+      <option name="presentableId" value="Default" />
+      <updated>1575474759490</updated>
+    </task>
+    <servers />
+  </component>
+</project>
\ No newline at end of file
diff --git a/__pycache__/configuration.cpython-36.pyc b/__pycache__/configuration.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19216a8cfb866d94afa95a5ae329a4cf9ee47f4d
Binary files /dev/null and b/__pycache__/configuration.cpython-36.pyc differ
diff --git a/config/.DS_Store b/config/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..dfe7ddb0f400bab31a43a8171599dc31c8e62b48
Binary files /dev/null and b/config/.DS_Store differ
diff --git a/config/__init__.py b/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/config/__pycache__/__init__.cpython-36 2.pyc b/config/__pycache__/__init__.cpython-36 2.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80b40a74486b624b66b76f86b2fa786eb7bb621c
Binary files /dev/null and b/config/__pycache__/__init__.cpython-36 2.pyc differ
diff --git a/config/__pycache__/__init__.cpython-36.pyc b/config/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28b47b6d3af461c0305ac294cb553cdfc58de179
Binary files /dev/null and b/config/__pycache__/__init__.cpython-36.pyc differ
diff --git a/config/__pycache__/config.cpython-36.pyc b/config/__pycache__/config.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d7d4051f78e15cfb1f9dbe8d4b3efa5d00616b5
Binary files /dev/null and b/config/__pycache__/config.cpython-36.pyc differ
diff --git a/config/config.py b/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..42f55291d2b27b284f7168363d369f7c10db9fa8
--- /dev/null
+++ b/config/config.py
@@ -0,0 +1,239 @@
+import numpy as np
+import configparser
+"""
+Configuration Parser:
+
+	|- Task Setting: Describe the typel of task
+		|- task_type:
+			|- Baseline: load all the data at once
+			|- Sequential: load several tasks sequentially
+				|- _split: split the dataset
+				|- _permute: permute the dataset
+			|- OneShot (#TODO): load data for one shot training
+
+		######################## Sequential Split Only ##########################
+		|- num_tasks: how many tasks to be trained
+		|- task_labels: list. FOR SPLIT EXPERIMENTS ONLY.
+		#########################################################################
+
+		|- dataset_name:
+			|- mnist
+			|- cifar10
+			|- cifar100
+			|- ucr(#TODO)
+			|- text(#TODO)
+			|- object detection(#TODO)
+
+	|- Model Setting:
+		|- model_type: Model Type
+			|- NN : Conventional Neural Networks
+			|- BNN: Bayesian Neural Networks
+			|- CBLN: Continual Learning Bayesian Neural Networks
+			|- GANs(#TODO): Generative Adversarial Networks
+		|- model_archi: Model Architecture
+			|- MLP: Multilayer Perceptron
+			|- CNN: Convolutioanl Neural Networks
+			|- RNN (#TODO): Recurrent Neural Networks
+			|- RL (#TODO): Reinforcement Neural Networks
+		|- hidden_dim: mlp dim per layer
+		|- num_layers: number of mlp layers
+
+		######################## Convolutional Only ##########################
+		|- is_conv: use convolutional layers or not. False (Default). 
+		|- conv_hidden_dim: convolutional dim per layer
+		|- conv_num_layers: number of convolutional layers
+		|- num_filter
+		|- stride
+		######################################################################
+
+	|- Training Setting:
+		|- batch_size
+		|- num_epoch / num_iter
+		|- learning_rate
+		|- optimizer: which optimizer to use
+		|- enable_one_hot: True (Default)
+		|- multi_head: False (Default)
+		|- lam: balance the novel and learned knowledge
+		|- enable_iterator: use iterator or sample from data. False (Default)
+
+	|- Test Setting: to produce the uncertainty information
+		|- num_runs
+		|- num_experiment
+
+
+"""
+
+
+
+
+
+class Config(object):
+	def __init__(self,config_file=None):
+		base_path = './config/configuration/'
+		default_path = base_path + 'default.ini'
+		self.cfg = configparser.ConfigParser()
+		self.cfg.read(default_path)
+		if config_file is not None:
+			path = base_path + config_file
+			self.cfg.read(path)	
+
+	def read_list(self,input_str,dtype=np.int32):
+			
+		task_labels = []
+		count = -1
+		for i in input_str:
+			if i in [',',' ']:
+				pass
+			elif i is '|':
+				count += 1
+				task_labels.append([])
+			else:
+				task_labels[count].append(int(i))
+		return task_labels
+
+	def read_omniglot_labels(self):
+		return [range(10),range(10,20),range(20,30)]
+
+
+	######################## Task Setting ##########################
+	@property
+	def task_type(self):
+		return self.cfg.get('Task Setting', 'task_type')
+
+	@property
+	def num_tasks(self):
+		try:
+			return self.cfg.getint('Task Setting', 'num_tasks')
+		except configparser.NoOptionError:
+			return len(self.task_labels)
+
+	@property
+	def task_labels(self):
+		#return self.read_omniglot_labels()
+		return self.read_list(self.cfg.get('Task Setting', 'task_labels'))
+	
+	@property
+	def dataset_name(self):
+		return self.cfg.get('Task Setting', 'dataset_name')
+	
+	
+	######################## Model Setting ##########################
+	@property
+	def model_type(self):
+		return self.cfg.get('Model Setting', 'model_type')
+
+	@property
+	def model_archi(self):
+		return self.cfg.get('Model Setting', 'model_archi')
+
+	@property
+	def hidden_dim(self):
+		return self.cfg.getint('Model Setting', 'hidden_dim')
+
+	@property
+	def num_layers(self):
+		return self.cfg.getint('Model Setting', 'num_layers')
+
+	@property
+	def is_conv(self):
+		return self.cfg.getboolean('Model Setting', 'is_conv')
+
+	@property
+	def conv_hidden_dim(self):
+		return self.cfg.getint('Model Setting', 'conv_hidden_dim')
+
+	@property
+	def conv_num_layers(self):
+		return self.cfg.getint('Model Setting', 'conv_num_layers')
+
+	@property
+	def num_filter(self):
+		return self.cfg.getint('Model Setting', 'num_filter')
+
+	@property
+	def stride(self):
+		return self.cfg.getint('Model Setting', 'stride')
+
+
+	######################## Training Setting ##########################
+
+	@property
+	def batch_size(self):
+		return self.cfg.getint('Training Setting', 'batch_size')
+
+	@property
+	def num_epoch(self):
+		return self.cfg.getint('Training Setting', 'num_epoch')
+
+	@property
+	def display_frequency(self):
+		return self.cfg.getint('Training Setting', 'display_frequency')
+
+	@property
+	def num_iter(self):
+		try:
+			return int(self.num_epoch * self.num_samples / self.batch_size)
+		except configparser.NoOptionError:
+			return self.cfg.getint('Training Setting', 'num_iter')
+
+	@property
+	def learning_rate(self):
+		return self.cfg.getfloat('Training Setting', 'learning_rate')
+
+	@property
+	def optimizer(self):
+		return self.cfg.get('Training Setting', 'optimizer')
+
+	@property
+	def enable_one_hot(self):
+		return self.cfg.getboolean('Training Setting', 'enable_one_hot')
+
+	@property
+	def multi_head(self):
+		return self.cfg.getboolean('Training Setting', 'multi_head')
+
+	@property
+	def enable_iterator(self):
+		return self.cfg.getboolean('Training Setting', 'enable_iterator')
+
+	@property
+	def lam(self):
+		return self.cfg.getfloat('Training Setting', 'lam')
+	
+	
+	@property
+	def num_samples(self):
+		return self._num_samples
+
+	@num_samples.setter
+	def num_samples(self,value):
+		self._num_samples = value
+
+	@property
+	def num_classes(self):
+		return self._num_classes
+
+	@num_classes.setter
+	def num_classes(self,value):
+		self._num_classes = value
+
+	######################## Test Setting ##########################
+	@property
+	def num_runs(self):
+		return self.cfg.getint('Test Setting', 'num_runs')
+
+	@property
+	def num_experiment(self):
+		return self.cfg.getint('Test Setting', 'num_experiment')
+	
+
+	@property
+	def test_batch_size(self):
+		return self.cfg.getint('Test Setting', 'test_batch_size')
+
+
+	
+	
+	
+	
+	
diff --git a/config/configuration/default.ini b/config/configuration/default.ini
new file mode 100644
index 0000000000000000000000000000000000000000..49cf1877752613ef6a5782297db9f932ac196e09
--- /dev/null
+++ b/config/configuration/default.ini
@@ -0,0 +1,30 @@
+[Task Setting]
+# Baseline, Sequential_split, Sequential_permute, OneShot(#TODO)
+task_type = Sequential_split  
+
+task_labels = 0,1,2,3,4 | 5,6,7,8,9
+dataset_name = mnist 
+
+[Model Setting]
+model_type = CBLN
+model_archi = MLP
+
+hidden_dim = 200
+num_layers = 3
+
+is_conv = False
+
+[Training Setting]
+batch_size = 128
+display_frequency = 200
+learning_rate = 0.01
+optimizer = Adam
+enable_one_hot = True
+multi_head = False
+enable_iterator = False
+lam = 1.
+
+[Test Setting]
+num_runs = 200
+num_experiment = 200
+
diff --git a/config/configuration/test.ini b/config/configuration/test.ini
new file mode 100644
index 0000000000000000000000000000000000000000..f6b6cc7cd128a6ac888feefd9137e86623d47409
--- /dev/null
+++ b/config/configuration/test.ini
@@ -0,0 +1,35 @@
+[Task Setting]
+# Baseline, Sequential_split, Sequential_permute, OneShot(#TODO)
+task_type = Sequential_split  
+# num_tasks = 5
+# task_labels = |0|1|2|3|4|5|6|7|8|9
+# task_labels = |0,1|2,3|4,5|6,7|8,9
+# task_labels = |0,1,2,3,4|5,6,7,8,9
+# task_labels = |0,1|2,3|4,5|6,7|8,9
+# task_labels = |0,1,2,3,4,5,6,7,8,9
+# task_labels = |0,1,2|3,4,5|6,7|8,9
+task_labels = |0,1|2,3|4,5|6,7|8,9
+dataset_name = mnist
+
+[Model Setting]
+model_type = NN
+model_archi = MLP
+
+hidden_dim = 200
+num_layers = 4
+
+is_conv = False
+
+[Training Setting]
+batch_size = 128
+num_iter = 1000
+optimizer = Adam
+lam = 0.
+multi_head = False
+enable_one_hot = True
+
+[Test Setting]
+num_runs = 200
+num_experiment = 20
+test_batch_size = 10
+
diff --git a/configuration.py b/configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcccb95eb90608bf4139245a6f5aa47ebbe4da4b
--- /dev/null
+++ b/configuration.py
@@ -0,0 +1,3 @@
+from config.config import Config
+
+conf = Config('test.ini')
diff --git a/learn_consistent.py b/learn_consistent.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd108f10239a1544c921f1df9115f08cd5c5fa47
--- /dev/null
+++ b/learn_consistent.py
@@ -0,0 +1,107 @@
+from keras.models import Model
+from keras.layers import Dense, Input, Conv2D, Flatten, MaxPooling2D
+from configuration import conf
+from utils.dataloader import Sequential_loader
+import numpy as np
+from utils.model_utils import mask_layer_by_task
+from utils.layers import Probability_CLF_Mul_by_task
+from utils.train_utils import train_with_task
+from utils.predict_utils import get_task_likelihood, get_test_acc
+
+PATH = './results/%s/' % conf.dataset_name
+
+epochs = 50
+latent_dim = 250
+output_dim = 10
+verbose = 0
+
+data_loader = Sequential_loader()
+
+inputs = Input(shape=(784,))
+task_input = Input(shape=(5,))
+archi = Dense(1000, activation='relu')(inputs)
+archi = mask_layer_by_task(task_input, archi)
+archi = Dense(1000, activation='relu')(archi)
+archi = mask_layer_by_task(task_input, archi)
+
+task_output = Probability_CLF_Mul_by_task(conf.num_tasks, num_centers=output_dim // conf.num_tasks)(
+    [task_input, archi])
+task_output = mask_layer_by_task(task_input, task_output, 'task_out')
+clf = Dense(output_dim, activation='softmax')(archi)
+clf = mask_layer_by_task(task_input, clf, 'clf_out')
+model = Model(inputs=[inputs, task_input], outputs=[clf, task_output])
+model.compile(loss=['categorical_crossentropy', 'mse'], optimizer='adam', metrics=['accuracy', 'mse'],
+              loss_weights=[1, 4])
+
+tlh = [] # Task Likelihood
+tlh_std = [] # Standard Deviation of Task Likelihood
+test_acc = []
+for task_idx in range(conf.num_tasks):
+    # Learn a new task
+    train_with_task(model, task_idx=task_idx, data_loader=data_loader)
+    # Get the likelihood of the current task
+    mean, std = get_task_likelihood(model, learned_task=task_idx, test_task=task_idx, data_loader=data_loader)
+    tlh.append(mean)
+    tlh_std.append(std)
+    # Get the likelihood of the next task
+    if task_idx < conf.num_tasks - 1:
+        mean, std = get_task_likelihood(model, learned_task=task_idx, test_task=task_idx+1, data_loader=data_loader)
+        tlh.append(mean)
+        tlh_std.append(std)
+    # Run 200 times to get the test accuracy (for drawing the figure)
+    for _ in range(conf.num_runs):
+        test_acc.append(get_test_acc(model,data_loader,test_on_whole_set=False))
+    # Print the average test accuracy across all the tasks
+    print('Learned %dth Task, Average test accuracy on all the task : %.3f'%(task_idx,get_test_acc(model, data_loader, test_on_whole_set=True)))
+
+
+def paa(sample, w=None):
+    w = sample.shape[0] // 20 if w is None else w
+    l = len(sample)
+    stepfloat = l / w
+    step = int(np.ceil(stepfloat))
+    start = 0
+    j = 1
+    paa = []
+    while start <= (l - step):
+        section = sample[start:start + step]
+        paa.append(np.mean(section))
+        start = int(j * stepfloat)
+        j += 1
+    return paa
+
+
+tlh_s = []
+for i in tlh:
+    tlh_s += i.tolist()
+tlh_s = np.array(tlh_s)
+
+tlh_std_s = []
+for i in tlh_std:
+    tlh_std_s += i.tolist()
+tlh_std_s = np.array(tlh_std_s)
+
+test_acc_s = np.array(test_acc).reshape(-1)
+
+import matplotlib.pyplot as plt
+import seaborn as sns
+
+sns.set()
+
+tlh = np.array(paa(tlh_s))
+tlh_std = np.array(paa(tlh_std_s))
+test_acc = np.array(paa(test_acc_s, tlh.shape[0]))
+
+fig = sns.lineplot(np.arange(len(tlh)), tlh, label='Task Likelihood')
+fig.fill_between(np.arange(len(tlh)), tlh - tlh_std, tlh + tlh_std, alpha=0.3)
+fig = sns.lineplot(np.arange(len(tlh)), test_acc, label='Test Accuracy')
+a = [10, 30, 50, 70]
+for i in a:
+    fig.fill_between(np.arange(i, i + 10 + 1), 0, 0.1, alpha=0.1, color='red')
+    fig.fill_between(np.arange(i - 10, i + 1), 0, 0.1, alpha=0.1, color='green')
+fig.fill_between(np.arange(90 - 10, 90), 0, 0.1, alpha=0.1, color='green')
+# a = fig.get_xticklabels()
+fig.set_xticklabels(['', 'Task 1', 'Task 2', 'Task 3', 'Task 4', 'Task 5'])
+plt.legend(loc='center right')
+plt.savefig(PATH + 'result')
+plt.show()
diff --git a/results/.DS_Store b/results/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..fe5ffd9446fac329c82a003fb85532a2fdb2e3a6
Binary files /dev/null and b/results/.DS_Store differ
diff --git a/results/mnist/result.png b/results/mnist/result.png
new file mode 100644
index 0000000000000000000000000000000000000000..b718b04455e8592c094188b5b263ebe8fdebb9f6
Binary files /dev/null and b/results/mnist/result.png differ
diff --git a/utils/.DS_Store b/utils/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..0c339ac5b9b16ed65b9dc4e97de18864161fb11b
Binary files /dev/null and b/utils/.DS_Store differ
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/utils/__pycache__/__init__.cpython-36 2.pyc b/utils/__pycache__/__init__.cpython-36 2.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e142c6f89a118a8a1d993df85078915222ff2db2
Binary files /dev/null and b/utils/__pycache__/__init__.cpython-36 2.pyc differ
diff --git a/utils/__pycache__/__init__.cpython-36.pyc b/utils/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd32f26b03d4f9dd41ab1b726dfab36b7b7a3a55
Binary files /dev/null and b/utils/__pycache__/__init__.cpython-36.pyc differ
diff --git a/utils/__pycache__/__init__.cpython-37.pyc b/utils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9323e5691cb4f9e3688624e4c01ac79384dfff1
Binary files /dev/null and b/utils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/utils/__pycache__/data_utils.cpython-36.pyc b/utils/__pycache__/data_utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41c0217c97d17f2eca21168ea7c2219499ef95af
Binary files /dev/null and b/utils/__pycache__/data_utils.cpython-36.pyc differ
diff --git a/utils/__pycache__/dataloader.cpython-36.pyc b/utils/__pycache__/dataloader.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bf2dd290e23b85938261bc5933b7084594e979a
Binary files /dev/null and b/utils/__pycache__/dataloader.cpython-36.pyc differ
diff --git a/utils/__pycache__/dataloader.cpython-37.pyc b/utils/__pycache__/dataloader.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..399ddb389e0b9d5e6794566aa73c12d64c1f3f67
Binary files /dev/null and b/utils/__pycache__/dataloader.cpython-37.pyc differ
diff --git a/utils/__pycache__/ewc_utils.cpython-36.pyc b/utils/__pycache__/ewc_utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d38c0e79bdcf5bd89f8bbed7bcc3155e408fbd00
Binary files /dev/null and b/utils/__pycache__/ewc_utils.cpython-36.pyc differ
diff --git a/utils/__pycache__/fig2grid.cpython-36.pyc b/utils/__pycache__/fig2grid.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..39989a9f587959739d34b1f7865874419b0a24b9
Binary files /dev/null and b/utils/__pycache__/fig2grid.cpython-36.pyc differ
diff --git a/utils/__pycache__/layers.cpython-36.pyc b/utils/__pycache__/layers.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e604adb6d884387df3294be2c16727cc5e03cb1d
Binary files /dev/null and b/utils/__pycache__/layers.cpython-36.pyc differ
diff --git a/utils/__pycache__/load_data.cpython-36.pyc b/utils/__pycache__/load_data.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4861423c1a27201f4fc6b3ddc91befe2d3a0c442
Binary files /dev/null and b/utils/__pycache__/load_data.cpython-36.pyc differ
diff --git a/utils/__pycache__/load_data.cpython-37.pyc b/utils/__pycache__/load_data.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f9f720243a90cd78b49dda268ae427d42e86bab7
Binary files /dev/null and b/utils/__pycache__/load_data.cpython-37.pyc differ
diff --git a/utils/__pycache__/model_utils.cpython-36.pyc b/utils/__pycache__/model_utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9089c9c9b4b2623217da9d7546d1426a45df0879
Binary files /dev/null and b/utils/__pycache__/model_utils.cpython-36.pyc differ
diff --git a/utils/__pycache__/predict_utils.cpython-36.pyc b/utils/__pycache__/predict_utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e9f42220f6e324d31f1f89cbdf450958905b75e
Binary files /dev/null and b/utils/__pycache__/predict_utils.cpython-36.pyc differ
diff --git a/utils/__pycache__/train_utils.cpython-36.pyc b/utils/__pycache__/train_utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e68cb6674023f972b9010a932f219a634f8d616b
Binary files /dev/null and b/utils/__pycache__/train_utils.cpython-36.pyc differ
diff --git a/utils/__pycache__/utils.cpython-36 2.pyc b/utils/__pycache__/utils.cpython-36 2.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..213faff09203b0c1e1cdd621d6a3b1dc3316bc42
Binary files /dev/null and b/utils/__pycache__/utils.cpython-36 2.pyc differ
diff --git a/utils/__pycache__/utils.cpython-36.pyc b/utils/__pycache__/utils.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..843ccbd9168104d14a45a45054381541af7d8158
Binary files /dev/null and b/utils/__pycache__/utils.cpython-36.pyc differ
diff --git a/utils/data_utils.py b/utils/data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a20e6adad39974be4e3a306de472cbba7d1397d
--- /dev/null
+++ b/utils/data_utils.py
@@ -0,0 +1,133 @@
+import os
+from matplotlib.pyplot import imread
+import numpy as np
+import pandas as pd
+
+def load_sub_omniglot(path,n=0):
+	
+	'''
+	path => Path of train directory or test directory
+	'''
+	X=[]
+	y = []
+	cat_dict = {}
+	lang_dict = {}
+	curr_y = n
+
+	# we load every alphabet seperately so we can isolate them later
+	for alphabet in os.listdir(path):
+	    if alphabet == '.DS_Store':
+	    	continue
+	    print("loading alphabet: " + alphabet)
+	    lang_dict[alphabet] = [curr_y,None]
+	    alphabet_path = os.path.join(path,alphabet)
+	    
+	    # every letter/category has it's own column in the array, so  load seperately
+	    for letter in os.listdir(alphabet_path):
+	        if letter == '.DS_Store':
+	        	continue
+	        cat_dict[curr_y] = (alphabet, letter)
+	        category_images=[]
+	        letter_path = os.path.join(alphabet_path, letter)
+	        
+	        # read all the images in the current category
+	        for filename in os.listdir(letter_path):
+	            image_path = os.path.join(letter_path, filename)
+	            image = imread(image_path)
+	            category_images.append(image)
+	            y.append(curr_y)
+	        try:
+	            #X.append(np.stack(category_images))
+	            X += list(np.stack(category_images))
+	        # edge case  - last one
+	        except ValueError as e:
+	            print(e)
+	            print("error - category_images:", category_images)
+	        curr_y += 1
+	        lang_dict[alphabet][1] = curr_y - 1
+	y = np.vstack(y)
+	X = np.stack(X)
+	print(X.shape,y.shape,len(np.unique(y)))
+	return X,y
+
+
+def load_omniglot(path=None):
+	if path is None:
+		train_path = './data/omniglot/images_background'
+		test_path = './data/omniglot/images_evaluation'
+	else:
+		train_path,test_path = path
+
+
+	return load_sub_omniglot(train_path),load_sub_omniglot(test_path)
+
+
+
+
+def load_tihm(path=None):
+	if path is None:
+		path = './data/timh1.5'
+	
+	data = []
+	label = []
+
+	for filename in os.listdir(path):
+		if '_lag_new.csv' in filename:
+			file_path = os.path.join(path, filename)
+			d = pd.read_csv(file_path)
+			if 'halway' in list(d.columns):
+				d = d.drop(columns='halway')
+			if 'd_front' not in list(d.columns):
+				continue
+			d = np.array(d.values)
+			d = d[:,2:]
+			data += d.tolist()
+			f_label = filename.split('_lag_new.csv')[0]
+			f_label += '_class.csv'
+			file_path = os.path.join(path, f_label)
+			d = np.array(pd.read_csv(file_path,header=None))
+			
+			label += d.reshape(-1).tolist()
+
+	data = np.array(data)
+	label = np.array(label)
+
+	return (data[:10000],label[:10000]),(data[10000:],label[10000:])
+
+
+
+def load_agitation(path=None):
+	if path is None:
+		path = './data/aigitation'
+
+	data = []
+	label = []
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/utils/dataloader.py b/utils/dataloader.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea1a7bbb47e0d75eda968dcef09e4ac37e1c5e17
--- /dev/null
+++ b/utils/dataloader.py
@@ -0,0 +1,193 @@
+import numpy as np
+from utils.load_data import Load_data
+import tensorflow as tf
+from configuration import conf
+
+
+class Baseline_loader(object):
+	def __init__(self):
+		try:
+			load_func = getattr(load_data,'load_'+ conf.dataset_name)
+			self.data = load_func(conf.is_conv)
+		except:
+			assert ValueError('Dataset is not available ... ')
+		conf.num_samples = self.data['X_train'].shape[0]
+		conf.shape_of_sample = self.data['X_train'].shape[1:]
+
+	@property
+	def num_classes(self):
+		return len(np.unique(self.data['y_train']))
+
+	@property
+	def num_samples(self):
+		return self.data['X_train'].shape[1]
+
+	@property
+	def shape_of_sample(self):
+		return self.data['X_train'].shape[1:]
+
+	@property
+	def is_flatten(self):
+		return len(self.data['X_train']) == 2
+		
+
+	def sample(self, dataset='train', batch_size=None):
+		if batch_size is None:
+			batch_size = conf.batch_size
+		assert dataset in ['train', 'val', 'test']
+
+		N = self.data['X_' + dataset].shape[0]
+		idx_N = np.random.choice(N, batch_size, replace=False)
+
+		images, labels = self.data['X_' + dataset][idx_N], self.data['y_' + dataset][idx_N]
+
+		return images, labels
+
+	def build_iterator(self, batch_size=None):
+		if batch_size is None:
+			batch_size = conf.batch_size
+		self.init = {}
+		with tf.name_scope('data'):
+			train_data = tf.data.Dataset.from_tensor_slices(self.data['X_train']).batch(batch_size)
+			test_data = tf.data.Dataset.from_tensor_slices(self.data['X_test']).batch(batch_size)
+			iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)
+			img,label = iterator.get_next()
+			self.init['train'] = iterator.make_initializer(train_data)
+			self.init['test'] = iterator.make_initializer(test_data)
+
+	def get_whole_dataset(self):
+		return self.data
+
+			
+
+class Sequential_loader(object):
+	def __init__(self):
+		self.data = Load_data().load()
+		self._task_idx = 0
+		num_samples = 0
+		for i in range(len(self.data)):
+			num_samples += self.data[i]['X_train'].shape[0]
+		conf.num_samples = num_samples
+
+	@property
+	def num_classes(self):
+		num_classes = 0
+		for i in range(len(self.data)):
+			num_classes += len(np.unique(self.data[i]['y_train']))
+		return num_classes
+
+	@property
+	def num_samples(self):
+		return conf.num_samples
+
+	@property
+	def shape_of_sample(self):
+		return self.data[0]['X_train'].shape[1:]
+
+	@property
+	def is_flatten(self):
+		return len(self.data[0]['X_train']) == 2
+		
+
+	def sample(self, task_idx=None, dataset='train', batch_size=None,whole_set=False):
+		if batch_size is None:
+			batch_size = conf.batch_size
+		if task_idx is None:
+			task_idx = self._task_idx
+		assert dataset in ['train', 'val', 'test']
+
+		if whole_set:
+			return self.data[task_idx]['X_' + dataset], self.data[task_idx]['y_' + dataset]
+
+		N = self.data[task_idx]['X_' + dataset].shape[0]
+		idx_N = np.random.choice(N, batch_size, replace=False)
+
+		images, labels = self.data[task_idx]['X_' + dataset][idx_N], self.data[task_idx]['y_' + dataset][idx_N]
+		if labels.ndim == 1:
+			labels = labels[:,np.newaxis]
+		return images, labels
+
+
+
+	def _build_iterator(self, batch_size=None):
+		if batch_size is None:
+			batch_size = conf.batch_size
+		self.data_init = {}
+		with tf.name_scope('data'):
+			train_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_train'],self.data[0]['y_train'])).batch(batch_size)
+			test_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_test'],self.data[0]['y_test'])).batch(batch_size)
+			iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)
+			self.img_holder,self.label_holder = iterator.get_next()
+			self.data_init[0] = {}
+			self.data_init[0]['train'] = iterator.make_initializer(train_data)
+			self.data_init[0]['test'] = iterator.make_initializer(test_data)
+			for i in range(1,len(self.data)):
+				train_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_train'],self.data[0]['y_train'])).batch(batch_size)
+				test_data = tf.data.Dataset.from_tensor_slices((self.data[0]['X_test'],self.data[0]['y_test'])).batch(batch_size)
+				self.data_init[i] = {}
+				self.data_init[i]['train'] = iterator.make_initializer(train_data)
+				self.data_init[i]['test'] = iterator.make_initializer(test_data)
+
+	@property
+	def task_idx(self):
+		return self._task_idx
+	
+
+	@task_idx.setter
+	def task_idx(self,idx):
+		self._task_idx = idx
+		print('------------ Training Task Index : %d ------------'%self._task_idx)
+	
+
+	def initial_data(self,task_idx=None):
+		if not hasattr(self,'data_init'):
+			self._build_iterator()
+		if task_idx is None:
+			task_idx = self._task_idx
+
+		return self.data_init[task_idx]
+
+	def get_holder(self):
+		if conf.enable_iterator:
+			return self.img_holder, self.label_holder
+		else:
+			img_holder = tf.placeholder(dtype=tf.float32,shape=[None,self.shape_of_sample[0]])
+			label_holder = tf.placeholder(dtype=tf.float32,shape=[None,conf.num_classes] if conf.enable_one_hot else [None,1])
+
+			return img_holder, label_holder
+	
+
+	def get_whole_dataset(self):
+		return self.data
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+	
+
+	
diff --git a/utils/layers.py b/utils/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..0327b3427d92662d202df6040504b167466278c4
--- /dev/null
+++ b/utils/layers.py
@@ -0,0 +1,59 @@
+import tensorflow as tf
+from keras import backend as K
+from utils.model_utils import mask_layer_by_task
+from keras.layers import Layer
+
+# Multiple Centers
+class Probability_CLF_Mul_by_task(Layer):
+    """docstring for Probability_CLF"""
+
+    def __init__(self, output_dim, num_centers=4, activation=None, sigma=1.0, **kwargs):
+        self.output_dim = output_dim
+        self.num_centers = num_centers
+        self.sigma = sigma
+        self.activation = activation
+        super(Probability_CLF_Mul_by_task, self).__init__(**kwargs)
+
+    def build(self, input_shape):
+        self.centers = {}
+        for idx in range(self.output_dim):
+            self.centers[idx] = []
+            for c in range(self.num_centers):
+                W = self.add_weight(name='center%d_%d' % (idx, c), shape=(input_shape[1][1],), initializer='uniform',
+                                    trainable=True)
+                self.centers[idx].append(W)
+
+        super(Probability_CLF_Mul_by_task, self).build(input_shape[1][1])
+
+    def call(self, inputs, training=None):
+        task_input = inputs[0]
+        x = inputs[1]
+        _, mask = mask_layer_by_task(task_input, x, return_mask=True)
+
+        logits = []
+        for idx in range(self.output_dim):
+            G = []
+
+            for c in range(self.num_centers):
+                G.append(self.gaussian_activation(
+                    tf.boolean_mask(tf.squared_difference(x, self.centers[idx][c]), mask[0], axis=1)))
+
+            G = tf.stack(G, axis=1)
+            P = tf.reduce_sum(G, axis=1) / (
+                    tf.reduce_sum(G, axis=1) + self.num_centers - tf.reduce_max(G, axis=1) * self.num_centers)
+            logits.append(P)
+
+        logits = tf.stack(logits, axis=1)
+
+        if self.activation is not None:
+            logits = self.activation(logits)
+
+        return K.in_train_phase(mask_layer_by_task(task_input, logits),
+                                mask_layer_by_task(task_input, logits),
+                                training=training)
+
+    def gaussian_activation(self, x):
+        return tf.exp(-tf.reduce_sum(x, axis=1) / (2. * self.sigma * self.sigma))
+
+    def compute_output_shape(self, input_shape):
+        return input_shape[1][0], self.output_dim
diff --git a/utils/load_data.py b/utils/load_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae9ed23f22e32dccc1491f0256f3fdbfcf5843e7
--- /dev/null
+++ b/utils/load_data.py
@@ -0,0 +1,164 @@
+import numpy as np
+import keras.datasets as Datasets
+from configuration import conf
+from keras.utils import np_utils
+
+class Load_data(object):
+
+	def __init__(self):
+		pass
+
+	def load(self):
+		task_type = conf.task_type
+		dataset_name = conf.dataset_name
+		assert task_type in ['Baseline','Sequential_split','Sequential_permute'], 'Task type is not valid'
+		assert dataset_name in ['mnist','cifar10','cifar100','omniglot','timh','fashion_mnist'], 'Dataset is not available'
+
+
+		data = self.load_dataset(dataset_name,conf.is_conv)
+
+		if task_type == 'Baseline':
+			return data
+		elif task_type == 'Sequential_split':
+			return self.split_data(data)
+		elif task_type == 'Sequential_permute':
+			return self.permute_data(data)
+
+
+
+	def load_dataset(self,data_name,is_conv):
+		if data_name in ['omniglot']:
+			from utils.data_utils import load_omniglot
+			(X_train, y_train), (X_test, y_test) = load_omniglot()
+		elif data_name in ['timh']:
+			from utils.data_utils import load_tihm
+			(X_train, y_train), (X_test, y_test) = load_tihm()
+			#self.get_description([X_train,y_train,X_test,y_test])
+		else:
+			dataset_obj = getattr(Datasets,data_name)
+			(X_train, y_train), (X_test, y_test) = dataset_obj.load_data()
+		self.nb_classes = len(np.unique(y_train))
+		conf.num_classes = self.nb_classes
+		if data_name in ['cifar10','cifar100']:
+			is_conv = True
+		if not is_conv:
+			#assert data_name not in ['cifar10','cifar100'], data_name + ' must be trained with is_conv = True'
+			X_train = X_train.reshape(X_train.shape[0],-1)
+			X_test = X_test.reshape(X_test.shape[0],-1)
+		else:
+			if data_name in ['mnist']:
+				X_train = X_train.reshape(X_train.shape[0],28,28,1)
+				X_test = X_test.reshape(X_test.shape[0],28,28,1)
+
+			elif data_name in ['cifar10','cifar100']:
+				X_train = X_train.reshape(X_train.shape[0],32,32,3)
+				X_test = X_test.reshape(X_test.shape[0],32,32,3)
+
+			elif data_name in ['omniglot']:
+				X_train = X_train.reshape(X_train.shape[0],105,105,1)
+				X_test = X_test.reshape(X_test.shape[0],105,105,1)
+
+
+		if np.max(X_train) == 255.:
+			print('Normalizing the training data ... ')
+			X_train = X_train.astype('float32') / 255
+			X_test = X_test.astype('float32') / 255
+
+		data = {
+			'X_train': X_train,
+			'y_train': y_train,
+			'X_test': X_test,
+			'y_test': y_test,
+		}
+		return data
+
+
+
+	def split_data(self,data):
+		try:
+			task_labels = conf.task_labels
+		except:
+			raise ValueError('Label is not provided ...')
+		datasets = {}
+		one_hot = conf.enable_one_hot
+		for task_idx,labels in enumerate(task_labels):
+			datasets[task_idx] = {}
+			train_idx = np.in1d(data['y_train'],labels)
+			datasets[task_idx]['X_train'] = data['X_train'][train_idx]
+
+			test_idx = np.in1d(data['y_test'],labels)
+			datasets[task_idx]['X_test'] = data['X_test'][test_idx]
+			if conf.multi_head:
+				if one_hot:
+					datasets[task_idx]['y_train'] = np_utils.to_categorical(data['y_train'][train_idx] - np.min(labels), len(labels))
+					datasets[task_idx]['y_test'] = np_utils.to_categorical(data['y_test'][test_idx] - np.min(labels), len(labels))
+				else:
+					datasets[task_idx]['y_train'] = data['y_train'][train_idx] - np.min(labels)
+					datasets[task_idx]['y_test'] = data['y_test'][test_idx] - np.min(labels)
+			else:	
+				datasets[task_idx]['y_train'] = np_utils.to_categorical(data['y_train'][train_idx], int(self.nb_classes)) if one_hot else data['y_train'][train_idx]
+				datasets[task_idx]['y_test'] = np_utils.to_categorical(data['y_test'][test_idx], int(self.nb_classes)) if one_hot else data['y_test'][test_idx]
+
+			#idx = np.in1d(data['y_test'],labels)
+			#datasets[task_idx]['X_test'] = data['X_test'][idx]
+			#datasets[task_idx]['y_test'] = np_utils.to_categorical(data['y_test'][idx], int(self.nb_classes)) if one_hot else data['y_test'][idx]
+
+		return datasets
+		
+	def permute_data(self,data):
+		num_tasks = conf.num_tasks
+		permutations = []
+		for i in range(num_tasks):
+			idx = np.arange(data['X_train'].shape[1],dtype=int)
+			if i > 0:
+				np.random.shuffle(idx)
+			permutations.append(idx)
+		datasets = {}
+		one_hot = conf.enable_one_hot
+		for task_idx, perm in enumerate(permutations):
+			datasets[task_idx] = {}
+
+			datasets[task_idx]['X_train'] = data['X_train'][:,perm]
+			datasets[task_idx]['X_test'] = data['X_test'][:,perm]
+			datasets[task_idx]['y_train'] = np_utils.to_categorical(data['y_train'], int(self.nb_classes)) if one_hot else data['y_train']
+			datasets[task_idx]['y_test'] = np_utils.to_categorical(data['y_test'], int(self.nb_classes)) if one_hot else data['y_test']
+		
+		return datasets
+
+
+	def get_description(self,data):
+		X_train,y_train,X_test,y_test = data
+		
+		print('X_train : ', X_train.shape)
+		print('X_test : ', X_test.shape)
+
+		print('y_train : ', np.unique(y_train))
+		for l in np.unique(y_train):
+			print('y_train == ',l, y_train[y_train==l].shape)
+			print('y_test == ',l, y_test[y_test==l].shape)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/utils/model_utils.py b/utils/model_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa82ab49487af3fe471cfeae5d71d084620250d5
--- /dev/null
+++ b/utils/model_utils.py
@@ -0,0 +1,17 @@
+import tensorflow as tf
+from keras.layers import Lambda
+from configuration import conf
+
+
+def mask_layer_by_task(task_input, input_tensor, name=None, return_mask=False):
+    mask = tf.expand_dims(task_input, axis=-1)
+    mask = tf.tile(mask, multiples=[1, 1, input_tensor.shape[1] // conf.num_tasks])
+    mask = tf.keras.layers.Flatten()(mask)
+    if name is None:
+        out = Lambda(lambda x: x * mask)(input_tensor)
+    else:
+        out = Lambda(lambda x: x * mask, name=name)(input_tensor)
+    if return_mask:
+        return out, mask
+    else:
+        return out
diff --git a/utils/predict_utils.py b/utils/predict_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c254a9af23521671822371bef2e6321284fa8f5
--- /dev/null
+++ b/utils/predict_utils.py
@@ -0,0 +1,69 @@
+import numpy as np
+from configuration import conf
+
+
+def block_likelihood(res, keep_dim=False):
+    block_size = conf.test_batch_size
+    extra_index = res.shape[0] % block_size
+    extra_values = res[-extra_index:]
+    resize_values = res[:-extra_index]
+    tlh = resize_values.reshape(-1, block_size)
+    tlh_std = np.std(tlh, axis=1, keepdims=True)
+    tlh_mean = np.mean(tlh, axis=1, keepdims=True)
+    if keep_dim:
+        tlh_mean = np.repeat(tlh_mean, block_size, axis=1).reshape(-1, )
+        extra_mean = np.repeat(np.mean(extra_values), len(extra_values))
+        return np.append(tlh_mean, extra_mean)
+    else:
+        extra_mean = np.mean(extra_values)
+        extra_std = np.std(extra_values)
+        tlh_mean = tlh_mean.reshape(-1, )
+        tlh_std = tlh_std.reshape(-1, )
+        return np.append(tlh_mean, extra_mean), np.append(tlh_std, extra_std)
+
+
+def get_task_likelihood(model, learned_task, test_task, data_loader):
+    task_likelihood = []
+    task_likelihood_var = []
+    tlh_prediction = []
+    pred = []
+    x, y = data_loader.sample(test_task, whole_set=True, dataset='test')
+    for learnd_idx in range(learned_task + 1):
+        task_input = np.zeros([y.shape[0], conf.num_tasks])
+        task_input[:, learnd_idx] = 1
+        prediction = model.predict([x, task_input])
+        tlh = np.max(prediction[1], axis=1)
+        tlh, tlh_var = block_likelihood(tlh)
+        task_likelihood.append(tlh)
+        task_likelihood_var.append(tlh_var)
+        pred.append(np.argmax(prediction[0], axis=1))
+
+    task_likelihood = np.array(task_likelihood)
+    task_likelihood_var = np.array(task_likelihood_var)
+
+    return np.max(task_likelihood, axis=0), \
+           task_likelihood_var[np.argmax(task_likelihood, axis=0), np.arange(task_likelihood_var.shape[1])]
+
+
+def get_test_acc(model, data_loader, test_on_whole_set=True):
+    test_acc = []
+    for task_idx in range(conf.num_tasks):
+        if test_on_whole_set:
+            x, y = data_loader.sample(task_idx, whole_set=True, dataset='test')
+        else:
+            x, y = data_loader.sample(task_idx, batch_size=conf.test_batch_size, dataset='test')
+        res = []
+        pred = []
+        for test_idx in range(conf.num_tasks):
+            task_input = np.zeros([y.shape[0], conf.num_tasks])
+            task_input[:, test_idx] = 1
+            prediction = model.predict([x, task_input])
+            res.append(block_likelihood(np.max(prediction[1], axis=1), keep_dim=True))
+            pred.append(np.argmax(prediction[0], axis=1))
+
+        res = np.array(res)
+        pred = np.array(pred)
+        acc = np.sum(pred[np.argmax(res, axis=0), np.arange(pred.shape[1])] == np.argmax(y, axis=1)) / y.shape[0]
+        test_acc.append(acc)
+
+    return np.mean(test_acc)
diff --git a/utils/train_utils.py b/utils/train_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f59c91b7a970024f30f0ba37874768b6c24a9b6a
--- /dev/null
+++ b/utils/train_utils.py
@@ -0,0 +1,9 @@
+import numpy as np
+from configuration import conf
+
+
+def train_with_task(model, task_idx, data_loader):
+    x, y = data_loader.sample(task_idx=task_idx, whole_set=True)
+    task_input = np.zeros([y.shape[0], conf.num_tasks])
+    task_input[:, task_idx] = 1
+    model.fit([x, task_input], [y, task_input], epochs=10, batch_size=conf.batch_size, verbose=0)
diff --git a/utils/utils.py b/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e83e59a033d6394820a7f0e871ac8325e1af1efc
--- /dev/null
+++ b/utils/utils.py
@@ -0,0 +1,8 @@
+import os
+
+def mkdir(path):
+    """ Create a directory if there isn't one already. """
+    try:
+        os.mkdir(path)
+    except OSError:
+        pass
\ No newline at end of file