Skip to content
Snippets Groups Projects
Commit 212453e9 authored by David Hendriks's avatar David Hendriks
Browse files

Updated manual and project description. Made scripts to read out ascii files....

Updated manual and project description. Made scripts to read out ascii files. Working on binaryfile readout.
parent 6f534877
No related branches found
No related tags found
No related merge requests found
File added
...@@ -34,6 +34,7 @@ Start of july - End of August (exact dates YTBD) ...@@ -34,6 +34,7 @@ Start of july - End of August (exact dates YTBD)
* http://adsabs.harvard.edu/abs/2013MNRAS.432.2779B * http://adsabs.harvard.edu/abs/2013MNRAS.432.2779B
* http://adsabs.harvard.edu/abs/2019arXiv190207718B * http://adsabs.harvard.edu/abs/2019arXiv190207718B
* [Website containing some lectures on N-body integration](http://silkroad.bao.ac.cn/web/index.php/seminars/lectures) * [Website containing some lectures on N-body integration](http://silkroad.bao.ac.cn/web/index.php/seminars/lectures)
* https://arxiv.org/abs/1711.09100
## Some watching material: ## Some watching material:
* https://www.cita.utoronto.ca/presentation-archive/?talk_id=745 * https://www.cita.utoronto.ca/presentation-archive/?talk_id=745
......
# Ascii readout script for nbody6++ files. Leading it into something that caries the dataframe
# Will prove as an example to other likewise scripts
import os
import pandas as pd
# TODO: put this into a function or a class.
# Point to the source directory.
source_dir = '/vol/ph/astro_code/dhendriks/work_dir/nbody6'
# Create the header line labels and data line labels
header_line_labels = ['NS', 'TIME[Myr]']
N_LABEL = ['TIME[NB]', 'I', 'NAME', 'K*', 'RI[RC]', 'M[M*]', 'Log10(L[L*])', 'LOG10(RS[R*])' , 'LOG10(Teff[K]']
# Go through file
with open(os.path.join(source_dir, 'sev.83_0'), 'r') as f:
header_1_data = f.readline().split()
data = []
for line in f:
data.append(line.split())
df = pd.DataFrame(data, columns=N_LABEL)
\ No newline at end of file
File added
...@@ -381,5 +381,5 @@ def main(): ...@@ -381,5 +381,5 @@ def main():
f5.close() f5.close()
if __name__ == '__main__': # if __name__ == '__main__':
main() # main()
\ No newline at end of file \ No newline at end of file
# Initially written by Mark Gieles.
# Modified by David Hendriks
# Code to convert NBODY6 OUT3 file to HDF5 file format
import numpy as np
import struct
import h5py
import sys
import argparse
# TODO: Fix cuda
# import pycuda.autoinit
# import pycuda.driver as drv
# from pycuda.compiler import SourceModule
import matplotlib.pyplot as plt
import os
import argparse
# Script for nbody6++ binary output files to hdf5.
def read_nbody6pp(f5, input_filename, sev):
"""
Read in the nbody6 output file(s).
Keyword arguments:
f5 -- The hdf5 object where the data will be written to
input_filename -- The OUT3 file generated by nbody6
sev -- flag to include output of stellar evolution
"""
# open NBODY6 file
with open(input_filename, "rb") as f:
# start looping over time and reading the nbody6 data.
# itime tracks the number of timesteps outputted.
itime = -1
# runs on some computers seem to generate extra bytes after the record length.
# 'extra' and its associated reads below take care of that.
extra = 0
# Do stuff with byte.
byte = f.read(4) # begin header block
model = 0
# Read out first header:
blocksize1 = struct.unpack('i', byte)[0] # header block size
print(blocksize1)
if extra == 1:
f.read(4)
itime = itime + 1
# read the header for this time
ntot = struct.unpack('i',f.read(4))[0] #ntot
if ntot < 10 and itime == 0:
# check for a nonsensical ntot value,
# which is symptomatic of bizarre extra record padding that occurs on some
# systems. if needed, include extra reads to deal with these.
extra = 1
ntot = struct.unpack('i',f.read(4))[0]
print('extra! '+str(extra))
model = struct.unpack('i',f.read(4))[0] #model
nrun = struct.unpack('i',f.read(4))[0] #nrun
nk = struct.unpack('i',f.read(4))[0] #nk
print(f'ntot:{ntot}, model:{model}, nrun:{nrun}, nk:{nk}')
#print(struct.unpack('i',f.read(4)))
# Readout second header:
#byte = f.read(4) # begin header block
#blocksize2 = struct.unpack('i', byte)[0] # header block size
#print(blocksize2)
time = struct.unpack('i', f.read(4))[0]
npairs = struct.unpack('i', f.read(4))[0]
ding2 = struct.unpack('i', f.read(4))[0]
ding3 = struct.unpack('i', f.read(4))[0]
print(f'time:{time} npairs {npairs} ding2{ding2} ding3{ding3}')
# print(blocksize1)
# if extra == 1:
# f.read(4)
# itime = itime + 1
# # read the header for this time
# ntot = struct.unpack('i',f.read(4))[0] #ntot
# if ntot < 10 and itime == 0:
# # check for a nonsensical ntot value,
# # which is symptomatic of bizarre extra record padding that occurs on some
# # systems. if needed, include extra reads to deal with these.
# extra = 1
# ntot = struct.unpack('i',f.read(4))[0]
# print('extra! '+str(extra))
# model = struct.unpack('i',f.read(4))[0] #model
# nrun = struct.unpack('i',f.read(4))[0] #nrun
# nk = struct.unpack('i',f.read(4))[0] #nk
quit()
# # while byte:
# blocksize1 = struct.unpack('i', byte)[0] # header block size
# print(blocksize1)
# if extra == 1:
# f.read(4)
# itime = itime + 1
# # read the header for this time
# ntot = struct.unpack('i',f.read(4))[0] #ntot
# if ntot < 10 and itime == 0:
# # check for a nonsensical ntot value,
# # which is symptomatic of bizarre extra record padding that occurs on some
# # systems. if needed, include extra reads to deal with these.
# extra = 1
# ntot = struct.unpack('i',f.read(4))[0]
# print('extra! '+str(extra))
# model = struct.unpack('i',f.read(4))[0] #model
# nrun = struct.unpack('i',f.read(4))[0] #nrun
# nk = struct.unpack('i',f.read(4))[0] #nk
def read_nbody6(f5, file, sev):
"""Read in the nbody6 output file(s).
Keyword arguments:
filename -- the OUT3 file generated by nbody6
"""
# open NBODY6 file
f = open(file,'r')
# start looping over time and reading the nbody6 data.
# itime tracks the number of timesteps outputted.
itime = -1
# runs on some computers seem to generate extra bytes after the record length.
# 'extra' and its associated reads below take care of that.
extra = 0
byte = f.read(4) # begin header block
model = 0
while byte != "":
blocksize1 = struct.unpack('i',byte)[0] # header block size
if extra == 1:
f.read(4)
itime = itime + 1
# read the header for this time
ntot = struct.unpack('i',f.read(4))[0] #ntot
if ntot < 10 and itime == 0:
# check for a nonsensical ntot value,
# which is symptomatic of bizarre extra record padding that occurs on some
# systems. if needed, include extra reads to deal with these.
extra = 1
ntot = struct.unpack('i',f.read(4))[0]
print('extra! '+str(extra))
model = struct.unpack('i',f.read(4))[0] #model
nrun = struct.unpack('i',f.read(4))[0] #nrun
nk = struct.unpack('i',f.read(4))[0] #nk
blocksize2 = struct.unpack('i',f.read(4))[0] #end header block size
if extra == 1:
f.read(4)
# check for consistency
if blocksize1 != blocksize2:
print('header trouble! t = '+str(itime)+' '+str(blocksize1)+' '+str(blocksize2))
sys.exit(1)
# now read the star data
blocksize1 = struct.unpack('i',f.read(4))[0] #begin data block size
if extra == 1:
f.read(4)
alist = []
for i in range(nk):
alist.append(struct.unpack('f',f.read(4))[0]) #Sverre's 'as'
alist.append(ntot)
stardata = np.zeros((ntot, 11))
datalist=[] # masses
for i in range(ntot):
datalist.append(struct.unpack('f',f.read(4))[0])
stardata[:,0] = datalist
datalistx=[] # positions
datalisty=[]
datalistz=[]
for i in range(ntot):
datalistx.append(struct.unpack('f',f.read(4))[0])
datalisty.append(struct.unpack('f',f.read(4))[0])
datalistz.append(struct.unpack('f',f.read(4))[0])
stardata[:,1] = datalistx
stardata[:,2] = datalisty
stardata[:,3] = datalistz
datalistx=[] # velocities
datalisty=[]
datalistz=[]
for i in range(ntot):
datalistx.append(struct.unpack('f',f.read(4))[0])
datalisty.append(struct.unpack('f',f.read(4))[0])
datalistz.append(struct.unpack('f',f.read(4))[0])
stardata[:,4] = datalistx
stardata[:,5] = datalisty
stardata[:,6] = datalistz
datalist=[] # names
for i in range(ntot):
datalist.append(struct.unpack('i',f.read(4))[0])
stardata[:,7] = datalist
if (sev):
datalist=[] # kstar
for i in range(ntot):
datalist.append(struct.unpack('i',f.read(4))[0])
stardata[:,8] = datalist
datalist=[] # lum
for i in range(ntot):
datalist.append(struct.unpack('f',f.read(4))[0])
stardata[:,9] = datalist
datalist=[] # teff
for i in range(ntot):
datalist.append(struct.unpack('f',f.read(4))[0])
stardata[:,10] = datalist
blocksize2 = struct.unpack('i',f.read(4))[0] #end data block size
if extra == 1:
f.read(4)
# check for consistency
if blocksize1 != blocksize2:
print('star data trouble! '+str(itime))
sys.exit(3)
splitarray = np.hsplit(stardata, [1,4,7,8,9,10])
# Output snapshot in hdf5 file
h5output(f5, alist, splitarray, model-1, sev)
# next header blocksize for loop control
byte = f.read(4) # begin header block
# close OUT3 and other files if present
f.close()
def h5output(f5, alist, splitarray, model, sev):
"""output the nbody6 snapshot data in hdf5 format. Plots the first snapshot
just to make sure it's sensible looking.
Keyword arguments:
outputfilename -- the name of the hdf5 file to create
alist -- header data from the nbody6 data. this is sverre's AS list.
splitarray -- the data that has been parsed from the nbody6 files
"""
# Create model group
modgrp = f5.create_group("%05d"%model)
f5.attrs["nmod"]=model+1 # Number of models, updated everytime a snapshot is written
# create groups for the "stars" and "cluster" data
stargrp = f5.create_group("%05d/stars"%model)
clustergrp = f5.create_group("%05d/cluster"%model)
# Compute bound particles
nbound,mbound,vcom,bound,kinbound,potbound = compute_energies(splitarray[0][:,0],splitarray[1],splitarray[2], np.array(alist[6:9]),alist[1])
# Assume the following for AS list:
# AS(1) = TTOT
# AS(2) = NBIN
# AS(3) = RBAR
# AS(4) = ZMBAR
# AS(5) = TSTAR
# AS(6) = VSTAR
# AS(7:9) = RDENS
# AS(10:12) = RG
# AS(13:15) = VG
# AS(16) = RC
# AS(17) = NC
# Populate the groups with data
massdset = stargrp.create_dataset('mass', data = alist[3]*splitarray[0][:,0],dtype='float32')
posdset = stargrp.create_dataset('pos', data = alist[2]*splitarray[1],dtype='float32')
veldset = stargrp.create_dataset('vel', data = alist[5]*splitarray[2],dtype='float32')
namedset = stargrp.create_dataset('id', data = splitarray[3][:,0],dtype='i')
if (sev):
kstardset = stargrp.create_dataset('kstar', data = splitarray[4][:,0],dtype='i')
lumdset = stargrp.create_dataset('lum', data = splitarray[5][:,0],dtype='float32')
teffdset = stargrp.create_dataset('teff', data = splitarray[6][:,0],dtype='float32')
bounddset = stargrp.create_dataset('bound', data = bound,dtype='bool')
timedset = clustergrp.create_dataset('age', data = alist[0]*alist[4])
nbindset = clustergrp.create_dataset('nbin', data = alist[1],dtype='i')
rdensdset = clustergrp.create_dataset('rdens',data = np.array(alist[6:9])*alist[2])
vcomdset = clustergrp.create_dataset('vcom' ,data = np.array(vcom)*alist[5])
rgdset = clustergrp.create_dataset('rg', data = np.array(alist[9:12]))
vgdset = clustergrp.create_dataset('vg', data = np.array(alist[12:15]))
rcdset = clustergrp.create_dataset('rcore',data = alist[2]*alist[15])
# Compute half-mass radii
rh = compute_rh(alist[3]*splitarray[0][:,0],alist[2]*splitarray[1], np.array(alist[6:9])*alist[2],splitarray[4][:,0],bound, sev)
rhndset = clustergrp.create_dataset('rhn', data = rh[0],dtype='float32')
rhn300dset = clustergrp.create_dataset('rhn300',data = rh[1],dtype='float32')
rhmdset = clustergrp.create_dataset('rhm', data = rh[2],dtype='float32')
ncdset = clustergrp.create_dataset('ncore', data = alist[16],dtype='i')
nbdset = clustergrp.create_dataset('nbound',data = nbound,dtype='i')
mbdset = clustergrp.create_dataset('mbound',data = mbound*alist[3],dtype='float32')
kinbdset = clustergrp.create_dataset('kinbound',data = kinbound,dtype='float32')
potbdset = clustergrp.create_dataset('potbound',data = potbound,dtype='float32')
print(" T = %10.3f [NBODY] %10.3f [MYR]; M(bound) = %10.3f, rhn = %10.3f; rhm = %10.3f"%(alist[0],alist[0]*alist[4],mbound, rh[0], rh[2]))
# Units information as attributes
massdset.attrs["unit"] = "[Msun]"
posdset.attrs["unit"] = "[pc] in guide centre reference frame"
veldset.attrs["unit"] = "[km/s] in guide centre reference frame"
if (sev):
lumdset.attrs["unit"] = "[log10 Lsun]"
teffdset.attrs["unit"] = "[log10 Teff]"
timedset.attrs["unit"] = "[Myr]"
rdensdset.attrs["unit"] = "[pc] in guide centre reference frame"
vcomdset.attrs["unit"] = "[km/s] in guide centre reference frame"
rgdset.attrs["unit"] = "[kpc] in Galactic reference frame"
vgdset.attrs["unit"] = "[km/s] in Galactic reference frame"
rcdset.attrs["unit"] = "[pc]"
mbdset.attrs["unit"] = "[Msun]"
return()
def main():
parser = argparse.ArgumentParser(description='Converts NBODY6++ output file to HDF5 format')
parser.add_argument('-f', dest='files', action="store", default='OUT3', help='NBODY6 files to convert [default="OUT3"]. delimited list input.')
parser.add_argument('-s', dest='sev', action="store_true", default=False, help='activate reading stellar evolution data [default=False]')
args = parser.parse_args()
# open up the hdf5 file
f5 = h5py.File('out.hdf5', 'w')
# Loop over all the given binary
for file in args.files.split():
print(f"Converting {file}")
read_nbody6pp(f5, file, args.sev)
f5.close()
if __name__=='__main__':
main()
# def main():
# parser = argparse.ArgumentParser(description='Converts OUT3 data from NBODY6 to HDF5 format')
# parser.add_argument('-f',dest='files',action="store",default='OUT3',help='NBODY6 files to convert [default="OUT3"]')
# parser.add_argument('-s', dest='sev', action="store_true", default=False, help='activate reading stellar evolution data [default=False]')
# args = parser.parse_args()
# # open up the hdf5 file!
# f5 = h5py.File('out.hdf5','w')
# # Loop over all NBODY6 OUT3 files
# for file in args.files.split():
# print " File = ",file
# read_nbody6(f5, file, args.sev)
# f5.close()
\ No newline at end of file
from scipy.io import FortranFile
import struct
fname = '/vol/ph/astro_code/dhendriks/work_dir/nbody6/conf.3_0'
f = FortranFile('/vol/ph/astro_code/dhendriks/work_dir/nbody6/conf.3_0', 'r')
f.read_ints()
# a = f.read_reals(dtype=float)
# print(a)
# import numpy as np
# with open(fname,'rb') as f:
# for k in range(4):
# data = np.fromfile(f, dtype=np.float32, count = 2*3)
# print(np.reshape(data,(2,3)))
# with open(fname, 'rb') as f:
# while True:
# raw = f.read(4)
# if len(raw)!=4:
# break # ignore the incomplete "record" if any
# record = struct.unpack("I", raw )
# print(record)
# with open(fname, 'rb') as f:
# for line in f:
# print(f.readline())
\ No newline at end of file
appdirs==1.4.3
atomicwrites==1.3.0
attrs==19.1.0
cycler==0.10.0
decorator==4.4.0
h5py==2.9.0
kiwisolver==1.1.0
Mako==1.0.10
MarkupSafe==1.1.1
matplotlib==3.1.0
more-itertools==7.0.0
numpy==1.16.3
pandas==0.24.2
pluggy==0.11.0
py==1.8.0
pyparsing==2.4.0
pytest==4.5.0
python-dateutil==2.8.0
pytools==2019.1.1
pytz==2019.1
six==1.12.0
wcwidth==0.1.7
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment