diff --git a/1DCNN.py b/1DCNN.py
index 60a618b40b534a4c4d516803bbbc92247495f423..b52c40aba98769723f7b8575d133b1fa5b873947 100644
--- a/1DCNN.py
+++ b/1DCNN.py
@@ -41,3 +41,82 @@ target = Extract(data)  # sleep apn or not
 data = np.array(data)
 target = np.array(target)
 
+# baseline model
+def create_baseline():
+	# create model
+	model = Sequential()
+	model.add(Dense(60, input_dim=6001, activation='relu'))
+	model.add(Dense(1, activation='sigmoid'))
+	# Compile model
+	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
+	return model
+
+# evaluate model with standardized dataset
+estimator = KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)
+kfold = StratifiedKFold(n_splits=10, shuffle=True)
+results = cross_val_score(estimator, data, target, cv=kfold)
+print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
+
+
+# evaluate baseline model with standardized dataset
+estimators = []
+estimators.append(('standardize', StandardScaler()))
+estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)))
+pipeline = Pipeline(estimators)
+kfold = StratifiedKFold(n_splits=10, shuffle=True)
+results = cross_val_score(pipeline, data, target, cv=kfold)
+print("Standardized: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
+
+
+# smaller model
+def create_smaller():
+	# create model
+	model = Sequential()
+	model.add(Dense(30, input_dim=60, activation='relu'))
+	model.add(Dense(1, activation='sigmoid'))
+	# Compile model
+	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
+	return model
+estimators = []
+estimators.append(('standardize', StandardScaler()))
+estimators.append(('mlp', KerasClassifier(build_fn=create_smaller, epochs=100, batch_size=5, verbose=0)))
+pipeline = Pipeline(estimators)
+kfold = StratifiedKFold(n_splits=10, shuffle=True)
+results = cross_val_score(pipeline, data, target, cv=kfold)
+print("Smaller: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
+
+
+# larger model
+def create_larger():
+	# create model
+	model = Sequential()
+	model.add(Dense(60, input_dim=60, activation='relu'))
+	model.add(Dense(30, activation='relu'))
+	model.add(Dense(1, activation='sigmoid'))
+	# Compile model
+	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
+	return model
+estimators = []
+estimators.append(('standardize', StandardScaler()))
+estimators.append(('mlp', KerasClassifier(build_fn=create_larger, epochs=100, batch_size=5, verbose=0)))
+pipeline = Pipeline(estimators)
+kfold = StratifiedKFold(n_splits=10, shuffle=True)
+results = cross_val_score(pipeline, data, target, cv=kfold)
+print("Larger: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
+
+
+#attempted adding layers
+
+#def evaluate_model(trainX, trainy, testX, testy):
+verbose, epochs, batch_size = 0, 10, 32
+n_timesteps, n_features, n_outputs = dtrainX.shape[1], dtrainX.shape[2], dtrainy.shape[0]
+
+model = Sequential()
+model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
+model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
+model.add(Dropout(0.5,))
+model.add(MaxPooling1D(pool_size=2))
+model.add(Flatten())
+model.add(Dense(100, activation='relu'))
+model.add(Dense(n_outputs, activation='sigmoid'))
+model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])