diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index fe4258c62286154dac4353d32b0eda0ebf81dda0..cc2a929c617a4c8e1366cc4e7d2903e0de01ba79 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -225,8 +225,8 @@ class Population(
         # non-existant subdicts.
         self.grid_results = AutoVivificationDict()
 
-        # Create location where ensemble results are written to
-        self.grid_ensemble_results = {}
+        # Create grid ensemble data location
+        self.grid_ensemble_results = self._new_grid_ensemble_results()
 
         # add metadata
         self.add_system_metadata()
@@ -733,7 +733,7 @@ class Population(
 
         # empty results
         self.grid_results = AutoVivificationDict()
-        self.grid_ensemble_results = {}
+        self.grid_ensemble_results = self._new_grid_ensemble_results()
 
         # set number of processes/cores we want to use
         self._set_nprocesses()
@@ -2393,3 +2393,13 @@ class Population(
                 self.grid_options["verbosity"],
                 3,
             )
+
+    def _new_grid_ensemble_results(self):
+        """
+        Function to return a new grid_ensemble_results dict: this should
+        be pre-filled by sub-dicts to prevent later errors.
+        """
+        return {
+            'metadata' : {},
+            'ensemble' : {}
+        }
diff --git a/binarycpython/utils/population_extensions/HPC.py b/binarycpython/utils/population_extensions/HPC.py
index 18934e137969c4a4f6249a3e01058e0b82f444f3..dadfab449237e97ed06b2d77c3014665ba5c0244 100644
--- a/binarycpython/utils/population_extensions/HPC.py
+++ b/binarycpython/utils/population_extensions/HPC.py
@@ -91,7 +91,7 @@ class HPC(condor, slurm):
         # make the output before checking anything, we do
         # this to remove any asynchronicity
         lines = []
-        for i in range(0, n):
+        for i in self.HPC_job_id_range():
             lines += [
                 os.path.join(
                     prefix, "{hpc_jobid}.{i}.gz\n".format(hpc_jobid=hpc_jobid, i=i)
@@ -558,7 +558,7 @@ class HPC(condor, slurm):
             joinfiles = self.HPC_load_joinfiles_list()
             joiningfile = self.HPC_path("joining")
             print(
-                "Joinfile list n={n} (should be {m}".format(
+                "Joinfile list n={n} (should be {m})".format(
                     n=len(joinfiles), m=self.HPC_njobs()
                 )
             )
@@ -680,8 +680,9 @@ class HPC(condor, slurm):
                 d["status"][x] = 0
                 d["joblist"][x] = []
 
-            for i in range(0, n):
+            for i in self.HPC_job_id_range():
                 s = self.HPC_get_status(job_id=_id, job_index=i)
+                #print("HPC get job",_id,':',i," status=",s)
                 if s is None:
                     s = "unknown"
                 if not s in d["status"]:
@@ -713,8 +714,6 @@ class HPC(condor, slurm):
     def HPC_queue_stats(self): # pragma: no cover
         """
         Function that returns the queue stats for the HPC grid
-
-        TODO: the slurm_queue_stats doesntt actually return anything
         """
 
         if self.grid_options["slurm"] > 0:
@@ -725,3 +724,13 @@ class HPC(condor, slurm):
             x = None
 
         return x
+
+    def HPC_job_id_range(self):
+        n = self.HPC_njobs()
+        if self.grid_options["slurm"] > 0:
+            return range(1, n+1)
+        elif self.grid_options["condor"] > 0:
+            return range(0, n)
+        else:
+            print("Called HPC_job_id_range when not running an HPC grid : you cannot do this.")
+            raise
diff --git a/binarycpython/utils/population_extensions/analytics.py b/binarycpython/utils/population_extensions/analytics.py
index b6fba1e3df4061841d4d6c150a60da1060005c08..57bbb25d6b469d471c39a0c582a6a9684753944f 100644
--- a/binarycpython/utils/population_extensions/analytics.py
+++ b/binarycpython/utils/population_extensions/analytics.py
@@ -61,6 +61,7 @@ class analytics:
         if "metadata" in self.grid_ensemble_results:
             # Add analytics dict to the metadata too:
             self.grid_ensemble_results["metadata"].update(analytics_dict)
+            print("Added analytics to metadata")
             self.add_system_metadata()
         else:
             # use existing analytics dict
diff --git a/binarycpython/utils/population_extensions/dataIO.py b/binarycpython/utils/population_extensions/dataIO.py
index c7e4e94db9fccfb1bba21399278166db30eaacca..40d485d20004a2f746f08f43e18bce92076ca7fd 100644
--- a/binarycpython/utils/population_extensions/dataIO.py
+++ b/binarycpython/utils/population_extensions/dataIO.py
@@ -159,7 +159,6 @@ class dataIO:
             except Exception as e:
                 obj = None
                 print("Loading of the compressed object went wrong: {}".format(e))
-
         return obj
 
     def merge_populations(self, refpop, newpop):
@@ -176,10 +175,11 @@ class dataIO:
         Note:
             The file should be saved using save_population_object()
         """
-
+        
         # combine data
-        refpop.grid_results = merge_dicts(refpop.grid_results, newpop.grid_results)
-
+        refpop.grid_results = merge_dicts(refpop.grid_results,
+                                          newpop.grid_results)
+        
         # special cases
         maxmem = 0
         if "max_memory_use" in refpop.grid_ensemble_results.get(
@@ -210,7 +210,8 @@ class dataIO:
 
             # merge the ensemble dicts
             refpop.grid_ensemble_results = merge_dicts(
-                refpop.grid_ensemble_results, newpop.grid_ensemble_results
+                refpop.grid_ensemble_results,
+                newpop.grid_ensemble_results
             )
 
             # set special cases
@@ -261,8 +262,9 @@ class dataIO:
             n = newpop.grid_ensemble_results["metadata"]["_count"]
         else:
             n = -1
+        
         print("Loaded data from {n} stars".format(n=n))
-
+        
         # merge with refpop
         self.merge_populations(refpop, newpop)
 
diff --git a/binarycpython/utils/population_extensions/slurm.py b/binarycpython/utils/population_extensions/slurm.py
index 5dc8234b3d1931994fa7be1c43da41c826e47292..4f1f4d3c86b102409ee6487e258676ff97efe8cb 100644
--- a/binarycpython/utils/population_extensions/slurm.py
+++ b/binarycpython/utils/population_extensions/slurm.py
@@ -110,6 +110,12 @@ class slurm:
                 f.close()
                 self.NFS_flush_hack(status_file)
 
+        print("Have set status in",status_file,"to",string)
+        with self.open(status_file,"r",encoding="utf-8") as f:
+            print("Contents")
+            print(f.readlines())
+            f.close()
+
     def get_slurm_status(self, jobid=None, jobarrayindex=None, slurm_dir=None):
         """
         Get and return the slurm status string corresponing to the self object, or jobid.jobarrayindex if they are passed in. If no status is found, returns an empty string.
@@ -447,7 +453,7 @@ echo \"running\" > {slurm_dir}/status/$SLURM_ARRAY_JOB_ID.$SLURM_ARRAY_TASK_ID
         """
         Function to XXX
 
-        TODO: is this function finished?
+        TODO
         """
 
         return None