diff --git a/cnn_comparison/exp_test_results_detected.pkl b/cnn_comparison/exp_test_results_detected.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..22373df5c9ad672376f3f9451c1087df6ebd3bc6
Binary files /dev/null and b/cnn_comparison/exp_test_results_detected.pkl differ
diff --git a/cnn_comparison/exp_test_results_ground.pkl b/cnn_comparison/exp_test_results_ground.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..6535e99eebbb2f7feb8738cc61d9daf2cefe45e8
Binary files /dev/null and b/cnn_comparison/exp_test_results_ground.pkl differ
diff --git a/rf_model.py b/rf_model.py
index d623dff6a69d5dc4cfd1bb867eb818797dfea789..13a35a04b755e49fe3114fd446bdb69c7be101ae 100644
--- a/rf_model.py
+++ b/rf_model.py
@@ -174,9 +174,29 @@ def probs_baseline():
     
     return baseline
 
-def predict_all():
+def probs_smthelse():
+    with open('./cnn_comparison/exp_test_results_ground.pkl', "rb") as f:
+        [logits_matrix, targets_list, class_to_idx, video_ids] = pickle.load(f)
+    
+    with open('./features_validation/ids.txt', 'r') as f:
+        ids = [int(line.rstrip()) for line in f.readlines()]
+        ids = np.asarray(ids)
+    
+    flat_list = [item for sublist in video_ids for item in sublist]
+    baseline_ids = list(int(item.strip()) for item in flat_list)
+    lookup = dict(zip(baseline_ids, logits_matrix))
+    
+    # build baseline prediction lookup ordered by our video ids
+    baseline = np.zeros((len(ids),logits_matrix.shape[1]))
+    for i in range(len(ids)):
+        if ids[i] in lookup:
+            baseline[i,:] = lookup[ids[i]]
+    
+    return baseline
+
+def predict_all0():
     # combine external predictions for 20bn cnn baseline
-    baseline = probs_baseline()
+    baseline = probs_smthelse()
     #baseline = np.zeros((len(mtest),17))
     
     # default to null class prediction
@@ -211,6 +231,43 @@ def predict_all():
     
     return Y
 
+def predict_all():
+    # combine external predictions for 20bn cnn baseline
+    baseline = probs_smthelse()
+    #baseline = np.zeros((len(mtest),17))
+    
+    # default to null class prediction
+    Y = np.zeros((len(mtest),), dtype=int)
+    
+    # generate predictions and probabilities per-model
+    preds = {}
+    probs = {}
+    for idx in rf:
+        preds[idx] = rf[idx].predict(xtest[idx])
+        probs[idx] = rf[idx].predict_proba(xtest[idx])
+        
+    # for each test sample
+    for i in range(len(mtest)):
+        mlabel = 0
+        max_prob = 0.0
+        
+        # for each model
+        for idx in rf:
+            if preds[idx][i] and baseline[i,idx-1] > max_prob:
+                mlabel = idx
+                max_prob = baseline[i,idx-1]
+        
+        # force a guess based on prob if all RF miss detection
+        if mlabel == 0:
+            for idx in rf:
+                if baseline[i,idx-1] > max_prob:
+                    mlabel = idx
+                    max_prob = baseline[i,idx-1]
+        
+        Y[i] = mlabel
+    
+    return Y
+
 if __name__ == "__main__":
     load_labels()