Newer
Older
'''
Classes and functions for TraceWin
- Note for clean-up
* Old LATTICE and MATCH classes can be deleted once trajectory
correction is established with new classes.
'''
#---- Lib
from numpy import *
from numpy.linalg import det
from struct import pack
from itertools import chain
from subprocess import check_output
from os import system
from os.path import isdir,isfile
Ryoichi Miyamoto
committed
from sys import exit
Ryoichi Miyamoto
committed
from lib_tw_elem import *
#-------- Classes
#---- Lattice and project
class LATTICE:
def __init__(self,file_name_lat,file_name_fmap=[],freq=352.21,gamma=1.0):
'''
:param file_name_lat: name of lattice file
:type file_name_lat: str
:param file_name_fmap: list of file map file(-s)
:type file_name_fmap: list or str
:param freq: RF frequency
:param gamma: relativistic gamma
if isinstance(file_name_fmap,basestring): file_name_fmap=[file_name_fmap]
dic_cls={'DRIFT' : DRIFT ,
'QUAD' : QUAD ,
Ryoichi Miyamoto
committed
'THIN_STEERING' : THIN_STEERING ,
'GAP' : GAP ,
'DTL_CEL' : DTL_CEL ,
#
'BEND' : BEND ,
Ryoichi Miyamoto
committed
'EDGE' : EDGE ,
'APERTURE' : APERTURE ,
'DIAG_POSITION' : DIAG_POSITION ,
#
'STEERER' : STEERER ,
'CHOPPER' : CHOPPER ,
#
Ryoichi Miyamoto
committed
'ADJUST' : ADJUST ,
'FREQ' : FREQ ,
'MARKER' : MARKER ,
#
'ERROR_BEAM_STAT' : ERROR_BEAM_STAT ,
'ERROR_BEAM_DYN' : ERROR_BEAM_DYN ,
'ERROR_QUAD_NCPL_STAT': ERROR_QUAD_NCPL_STAT,
Ryoichi Miyamoto
committed
'ERROR_QUAD_CPL_STAT' : ERROR_QUAD_CPL_STAT ,
'ERROR_CAV_NCPL_STAT' : ERROR_CAV_NCPL_STAT ,
'ERROR_CAV_NCPL_DYN' : ERROR_CAV_NCPL_DYN ,
'ERROR_CAV_CPL_STAT' : ERROR_CAV_CPL_STAT ,
Ryoichi Miyamoto
committed
'ERROR_CAV_CPL_DYN' : ERROR_CAV_CPL_DYN ,
'ERROR_STAT_FILE' : ERROR_STAT_FILE ,
'ERROR_DYN_FILE' : ERROR_DYN_FILE }
# Field map dict
dic_fmap={}
for file_name_fmap_i in file_name_fmap:
name_fmap='.'.join(file_name_fmap_i.split('/')[-1].split('.')[:-1]) # Remove / and extention
dic_fmap[name_fmap]=FIELD_MAP_DATA(file_name_fmap_i)
# Go through the lat file
with open(file_name_lat) as file:
lst=[]
for lin in file:
lin=lin.partition(';')[0] # Remove comment
if lin.split()!=[]: # Remove empty line
# Split a line
if ':' in lin:
name=lin.partition(':')[0].split()[0 ]
typ =lin.partition(':')[2].split()[0 ].upper()
para=lin.partition(':')[2].split()[1:]
else:
name=''
typ =lin.split()[0 ].upper()
para=lin.split()[1:]
# Map to a class
if typ == 'FIELD_MAP' : lst.append( FIELD_MAP(name,typ,para,dic_fmap))
elif typ in dic_cls.keys() : lst.append(dic_cls[typ](name,typ,para) )
elif 'DIAG' in typ : lst.append( DIAG(name,typ,para) )
else : lst.append( COMM(name,typ,para) )
# in case of field map path, update dictionary with new path
if typ == 'FIELD_MAP_PATH' : _update_field_map_dict(dic_fmap,para[0])
# Break the loop
if typ=='END': break
# Instances
self.gamma=gamma
self.freq =freq
self.lst =lst
self.fmap =dic_fmap # Maybe not needed ...
# Assign idx, idx_elem, s, freq, apt
self.update_idx()
def get_correctors_idx(self,i):
'''
Get correctors associated to corrector index i
'''
import logging
found=False
correctors=[]
for element in self.lst:
Ryoichi Miyamoto
committed
if found:
#WARNING I worry that there could be an inactive comment/element between the ADJUST and actual corrector
logging.debug("Found element {} for corrector family {}".format(element.typ,i))
correctors.append(element)
found=False
if isinstance(element,COMM):
if element.typ in ['ADJUST','ADJUST_STEERER']:
if int(element.para[0])==i: # next element is the corrector
found=True
return correctors
def get_elem_idx(self,i):
'''
Get a TraceWin index number
Note: We start counting from 0, TW starts from 1
'''
for element in self.lst:
if element.idx_elem==i-1:
return element
def get_steerer_for(self,idx_elem):
'''
Returns the steerer object for an element (e.g. quad)
'''
previous=None
for element in self.lst:
if element.idx_elem+1==idx_elem:
if previous.typ=='STEERER':
return previous
return None
previous=element
def update_idx(self):
# Assign/update idx, idx_elem, s, freq
for i in range(len(self.lst)):
if i==0:
self.lst[0].idx =-1
self.lst[0].idx_elem=-1
self.lst[0].s = 0.0
self.lst[0].freq=self.freq
if i!=0:
self.lst[i].idx =self.lst[i-1].idx
self.lst[i].idx_elem=self.lst[i-1].idx_elem
self.lst[i].s =self.lst[i-1].s
self.lst[i].freq =self.lst[i-1].freq
self.lst[i].update_idx()
Ryoichi Miyamoto
committed
# Assign apt (using apt of the previous elem for diag elem)
for i in range(len(self.lst)):
try:
if self.lst[i].apt==None:
if self.lst[i].idx_elem==0:
for k in range(1,len(self.lst)):
try:
if self.lst[k].apt!=None:
self.lst[i].apt=self.lst[k].apt; break
except: pass
else:
for k in range(i)[::-1]:
try : self.lst[i].apt=self.lst[k].apt; break
except: pass
def update_gamma(self):
for i in range(len(self.lst)):
if i==0: self.lst[0].gamma=self.gamma
if i!=0: self.lst[i].gamma=self.lst[i-1].gamma
try : self.lst[i].update_gamma()
except: pass
def update_st(self,file_name):
'''
Assign/update steerer values from "Steerer_Values.txt".
'''
# Extract BLx and BLy from the file
with open(file_name,'r') as file:
BLx={}; BLy={}
for lin in file:
lin=lin.split()[3:]
for i in range(len(lin)):
if lin[i]==':' : idx_elem =int(lin[i-1])-1 # "-1" since idx=0,1,2,...
if lin[i]=='BLx=': BLx[idx_elem]=float(lin[i+1])
if lin[i]=='BLy=': BLy[idx_elem]=float(lin[i+1])
# Assign/update steerers
for i in range(len(self.lst)):
if self.lst[i].idx_elem in BLx:
idx_elem=self.lst[i].idx_elem
if self.lst[i].typ=='THIN_STEERING':
self.lst[i].BLx=BLx[idx_elem]
self.lst[i].BLy=BLy[idx_elem]
if self.lst[i].typ!='THIN_STEERING':
for k in range(i)[::-1]:
if self.lst[k].typ=='STEERER':
self.lst[k].Bx=BLx[idx_elem]/self.lst[i].L
self.lst[k].By=BLy[idx_elem]/self.lst[i].L
break
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
def update_adj(self,file_name="Adjusted_Values.txt"):
'''
Assign/update correction values from "Adjusted_Values.txt".
WARNING: many corner cases, what happens if multiple adjust
commands have corrected same element for example?
Use with care!
'''
with open(file_name,'r') as file:
# First we read in all corrections into dictionaries
values={}
counts={}
for lin in file:
lin=lin.split()
i=int(lin[1][1:-1])
if i=='ERROR' and lin[0]=='BEAM':
continue
settings={}
for j in xrange(len(lin[2:])/3):
k=int(lin[2+3*j])
val=float(lin[4+3*j])
if k in settings:
settings[k].append(val)
else:
settings[k]=[val]
values[i]=settings
for j in settings:
counts[j]=0
# now we will do all the ADJUST_STEERER ones
corr_next=False
for el in self.lst:
if isinstance(el,COMM) and el.typ=='ADJUST_STEERER':
i=int(el.para[0])
if i in values:
corr_next=values[i]
elif corr_next:
if isinstance(el,STEERER):
# Bx and By are corrected..
vals=corr_next.values()[0]
el.Bx=vals[0]
el.By=vals[1]
corr_next=False
# now we will do all the ADJUST ones
corr_next=False
# The TraceWin index number of the current element:
current=-1
for el in self.lst:
if el.idx_elem!=-1:
current=el.idx_elem+1
if isinstance(el,COMM) and el.typ=='ADJUST':
# The index of the corrector scheme
i = int(el.para[0])
# the parameter column to vary:
j = int(el.para[1])-1
# The TraceWin element index of the element we will vary:
k = current+1
# This corrector might not be used:
if i in values:
# We will correct the next active element in lattice:
value = values[i][k][counts[k]]
# We have now used one value of the total in the current corrector:
counts[k]+=1
# the element to vary:
vary=self.get_elem_idx(current+1)
vary.para[j]=value
vary.update()
def get_tw(self,file_name):
with open(file_name,'w') as file:
for lat_i in self.lst:
file.write(lat_i.get_tw()+'\n')
def get_madx(self,file_name_elem='elem.madx',file_name_seq='seq.madx'):
if self.lst[-1].gamma==1.0: self.update_gamma() # Assign gamma, if not done yet
with open(file_name_elem,'w') as file:
for lat_i in self.lst:
try : print >>file,lat_i.get_madx()
except: pass
with open(file_name_elem,'r') as file: lst_name=[lin.split(':')[0] for lin in file]
with open(file_name_seq ,'w') as file: print >>file,'linac:line=('+','.join(lst_name)+');'
def get_fluka(self,file_name='elem.dat'):
if self.lst[-1].gamma==1.0: self.update_gamma() # Assign gamma, if not done yet
with open(file_name,'w') as file:
for lat_i in self.lst:
try : print >>file,lat_i.get_fluka()
except: pass
if self.lst[-1].gamma==1.0: self.update_gamma() # Assign gamma, if not done yet
with open(file_name,'w') as file:
for lat_i in self.lst:
try : print >>file,lat_i.get_mars()
except: pass
class PROJECT:
'''
- This is for running multiple simulations 1-by-1 under 1 project.
- Maybe not very useful...
2015.10.15
'''
def __init__(self,file_name_proj='proj.ini'):
# Instances (Add more options as needed.)
self.file_name_proj=file_name_proj
self.file_name_lat =None
self.path_cal =None
self.seed =None
self.flag_hide =None
def exe(self):
opt_exe ='TraceWin64_noX11 '+self.file_name_proj
if self.file_name_lat!=None: opt_exe+=' dat_file=' +self.file_name_lat
if self.path_cal !=None: opt_exe+=' path_cal=' +self.path_cal
if self.seed !=None: opt_exe+=' random_seed='+self.seed
if self.flag_hide !=None: opt_exe+=' hide'
## if self.path_cal!=None:
## if isdir(self.path_cal)==False: system('mkdir '+self.path_cal)
system(opt_exe)
#---- Data related
class PARTRAN:
'''
Note:
Ryoichi Miyamoto
committed
- The list not complete. Add parameters as needed.
Ryoichi Miyamoto
committed
History:
Ryoichi Miyamoto
committed
- 2016.02.17: Changed how to identify the line of indices.
- 2016.02.17: Added a logic to avoid #/0 for LEBT.
Ryoichi Miyamoto
committed
'''
def __init__(self,file_name):
# Consts to convert phs to z.
mass=938.272029
freq=352.21
# Extract indices.
with open(file_name) as file:
for lin in file.readlines():
lin=lin.split()
Ryoichi Miyamoto
committed
if '##' in lin[0]:
idx_s =lin.index("z(m)" ); idx_gamma=lin.index("gama-1" )
idx_x =lin.index("x0" ); idx_y =lin.index("y0" ); idx_phs =lin.index("p0" )
idx_sigx =lin.index("SizeX" ); idx_sigy =lin.index("SizeY" ); idx_sigz=lin.index("SizeZ"); idx_sigp=lin.index("SizeP")
idx_alfx =lin.index("sxx'" ); idx_alfy =lin.index("syy'" ); idx_alfz=lin.index("szdp" )
idx_epsx =lin.index("ex" ); idx_epsy =lin.index("ey" ); idx_epsz=lin.index("ezdp" ); idx_epsp=lin.index("ep" )
idx_halx =lin.index("hx" ); idx_haly =lin.index("hy" ); idx_halp=lin.index("hp" )
idx_Nptcl=lin.index("npart" ); idx_loss =lin.index("Powlost")
# Extract data.
with open(file_name) as file:
data=[]; flag=0
for lin in file.readlines():
lin=lin.split()
if flag ==1 : data.append(map(float,lin))
if '##' in lin[0]: flag=1
data=array(data).transpose()
# Instances
self.s =data[idx_s ]
self.x =data[idx_x ]; self.y =data[idx_y ]; self.phs =data[idx_phs ]
self.sigx =data[idx_sigx ]; self.sigy=data[idx_sigy]; self.sigz=data[idx_sigz]; self.sigp=data[idx_sigp]
self.epsx =data[idx_epsx ]; self.epsy=data[idx_epsy]; self.epsz=data[idx_epsz]; self.epsp=data[idx_epsp]
self.halx =data[idx_halx ]; self.haly=data[idx_haly]; self.halz=data[idx_halp]; self.halp=data[idx_halp]
self.Nptcl=data[idx_Nptcl]; self.loss=data[idx_loss]
Ryoichi Miyamoto
committed
# Way around for 0 emitt
for i in range(len(self.s)):
Ryoichi Miyamoto
committed
if self.epsx[i]==0.0: self.epsx[i]=inf
if self.epsy[i]==0.0: self.epsy[i]=inf
Ryoichi Miyamoto
committed
if self.epsz[i]==0.0: self.epsz[i]=inf
if self.epsp[i]==0.0: self.epsp[i]=inf
Ryoichi Miyamoto
committed
# Additional instances
self.gamma= data[idx_gamma]+1.0
self.beta = sqrt(1.0-1.0/self.gamma**2)
self.z =-self.phs*self.beta*(c/freq*1e5)/360.0
self.betx = self.sigx**2/self.epsx*self.beta*self.gamma
self.bety = self.sigy**2/self.epsy*self.beta*self.gamma
self.betz = self.sigz**2/self.epsz*self.beta*self.gamma**3
self.betp = self.sigp**2/self.epsp
self.alfx =-data[idx_alfx]/self.epsx*self.beta*self.gamma
self.alfy =-data[idx_alfy]/self.epsy*self.beta*self.gamma
self.alfz =-data[idx_alfz]/self.epsz*self.beta*self.gamma**3
self.alfp =-self.alfz
Ryoichi Miyamoto
committed
# Set inf emitt back to 0
for i in range(len(self.s)):
Ryoichi Miyamoto
committed
if self.epsx[i]==inf: self.epsx[i]=0.0
Ryoichi Miyamoto
committed
if self.epsy[i]==inf: self.epsy[i]=0.0
if self.epsz[i]==inf: self.epsz[i]=0.0
if self.epsp[i]==inf: self.epsp[i]=0.0
Ryoichi Miyamoto
committed
self.s =self.s.tolist() ; self.gamma=self.gamma.tolist(); self.beta=self.beta.tolist()
self.x =self.x.tolist() ; self.y =self.y.tolist() ; self.z =self.z.tolist() ; self.phs =self.phs.tolist()
self.sigx =self.sigx.tolist() ; self.sigy =self.sigy.tolist() ; self.sigz=self.sigz.tolist(); self.sigp=self.sigp.tolist()
self.betx =self.betx.tolist() ; self.bety =self.bety.tolist() ; self.betz=self.betz.tolist(); self.betp=self.betp.tolist()
self.alfx =self.alfx.tolist() ; self.alfy =self.alfy.tolist() ; self.alfz=self.alfz.tolist(); self.alfp=self.alfp.tolist()
self.epsx =self.epsx.tolist() ; self.epsy =self.epsy.tolist() ; self.epsz=self.epsz.tolist(); self.epsp=self.epsp.tolist()
self.halx =self.halx.tolist() ; self.haly =self.haly.tolist() ; self.halz=self.halz.tolist(); self.halp=self.halp.tolist()
self.Nptcl=self.Nptcl.tolist(); self.loss =self.loss.tolist()
def loss_den(self,file_name_dt='',dlt_dt=5e-6):
return loss_elem2den(self.s,self.loss,file_name_dt,dlt_dt)
class DST:
'''
Class for a TraceWin's .dst file.
- TraceWin seems using beta and gamma for each particle
so the conversion to (z,z') is based on this assumption.
2015.10.06
'''
def __init__(self,file_name,unit_x='cm',unit_px='rad',unit_z='rad',unit_pz='MeV'):
# Const
c=2.99792458
# Read the file
with open(file_name) as file:
dummy=fromfile(file,dtype= uint8 ,count=2 )
Nptcl=fromfile(file,dtype= uint32,count=1 )[0]
Ibeam=fromfile(file,dtype=float64,count=1 )[0]
freq =fromfile(file,dtype=float64,count=1 )[0]
dummy=fromfile(file,dtype= uint8 ,count=1 )
x =fromfile(file,dtype=float64,count=Nptcl*6).reshape(Nptcl,6).transpose()
mass =fromfile(file,dtype=float64,count=1 )[0]
# Adjust units
gamma=1.0+x[5]/mass; beta=sqrt(1-1/gamma**2)
if unit_x =='mm' : x[0]= x[0]*1e1; x[2]=x[2]*1e1
if unit_px=='mrad': x[1]= x[1]*1e3; x[3]=x[3]*1e3
if unit_z =='deg' : x[4]= x[4]*180/pi
if unit_z =='mm' : x[4]=-x[4]*c*beta/(2*pi*freq)*1e5
if unit_pz=='mrad': x[5]=(x[5]-mean(x[5]))/(mass*beta**2*gamma**3)*1e3
# Instances
self.mass =mass
self.freq =freq
self.Ibeam=Ibeam
class DENSITY:
'''
- Note instances are not identical for Nrun=1 and Nrun>1.
- Be careful with # of steps for an envelope file.
- When Nrun>1, ave and rms in a file are sum and squared sum.
- Double check before a production !!!!
- Dim of arrays:
Nelem(Nidx): idx_elem, s, Nptcl, Ibeam
4 x Nelem(Nidx): apt # x, y, dx, dy
3 x Nelem(Nidx): accpt # phs+, phs-, ken
7 x Nelem(Nidx): cent_(..), sig, xmax, xmin # x, y, phs, ken, r, z, dpp
3 x Nelem(Nidx): eps # x, y, phs
Nrun x Nelem: loss
Nelem : loss_num_(..), loss_pow_(..)
Ryoichi Miyamoto
committed
- History:
Ryoichi Miyamoto
committed
* 2015.11.12: Baseline ver
Ryoichi Miyamoto
committed
* 2016.03.29: Adapted to ver 9 (apt includes shifts)
'''
def __init__(self,file_name):
#-- Empty arrays
idx_elem =[]; s =[]
apt =[]; accpt =[]
Nptcl =[]; Ibeam =[]
cent_ave =[]; cent_rms =[]
cent_max =[]; cent_min =[]
sig_ave =[]; sig_rms =[]
eps_ave =[]; eps_rms =[]
xmax =[]; xmin =[]
loss_num =[]; loss_pow =[]
loss_num_ave=[]; loss_pow_ave=[]
loss_num_rms=[]; loss_pow_rms=[]
loss_num_max=[]; loss_pow_max=[]
loss_num_min=[]; loss_pow_min=[]
den =[]; den_pow =[]
with open(file_name) as file:
while True:
try:
# Partran and envelope
ver,year,flag_long=fromfile(file,dtype=uint16,count=3)
Nrun =fromfile(file,dtype=uint32,count=1)[0]
idx_elem.append(fromfile(file,dtype= uint32,count=1)[0])
Ibeam.append(fromfile(file,dtype=float32,count=1)[0])
s.append( fromfile(file,dtype=float32,count=1)[0])
if ver>=9: apt.append(fromfile(file,dtype=float32,count=4))
Ryoichi Miyamoto
committed
else : apt.append(fromfile(file,dtype=float32,count=2))
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
Nstep=fromfile(file,dtype=uint32,count=1)[0]
cent_ave.append(fromfile(file,dtype=float32,count=7))
cent_rms.append(fromfile(file,dtype=float32,count=7))
xmax.append( fromfile(file,dtype=float32,count=7))
xmin.append( fromfile(file,dtype=float32,count=7))
if ver> 5:
sig_ave.append(fromfile(file,dtype=float32,count=7))
sig_rms.append(fromfile(file,dtype=float32,count=7))
if ver>=6:
cent_min.append(fromfile(file,dtype=float32,count=7))
cent_max.append(fromfile(file,dtype=float32,count=7))
if ver>=7:
eps_ave.append(fromfile(file,dtype=float32,count=3))
eps_rms.append(fromfile(file,dtype=float32,count=3))
if ver>=8:
accpt.append(fromfile(file,dtype=float32,count=3))
Nptcl.append(fromfile(file,dtype=uint64,count=1)[0])
# Partran only
if Nptcl[-1]>0:
loss_num.append([]); loss_pow.append([]); den.append([]); den_pow.append([])
for n in range(Nrun):
loss_num[-1].append(fromfile(file,dtype= uint64,count=1)[0])
loss_pow[-1].append(fromfile(file,dtype=float32,count=1)[0])
loss_num_ave.append(sum(loss_num[-1]))
loss_num_rms.append(fromfile(file,dtype= uint64,count=1)[0])
loss_num_min.append(fromfile(file,dtype= uint64,count=1)[0])
loss_num_max.append(fromfile(file,dtype= uint64,count=1)[0])
loss_pow_rms.append(fromfile(file,dtype=float64,count=1)[0])
loss_pow_min.append(fromfile(file,dtype=float32,count=1)[0])
loss_pow_max.append(fromfile(file,dtype=float32,count=1)[0])
for k in range(7):
if flag_long==1: den[-1].append(fromfile(file,dtype=uint64,count=Nstep))
else : den[-1].append(fromfile(file,dtype=uint32,count=Nstep))
if Ibeam[-1]>0:
for k in range(3):
den_pow[-1].append(fromfile(file,dtype=float32,count=Nstep))
#print Nrun,Nptcl[-1],idx_elem[-1] # Diag
except:
break
#-- Reshape arrays
apt =swapaxes( apt,1,0); accpt =swapaxes( accpt,1,0)
cent_ave=swapaxes(cent_ave,1,0); cent_rms=swapaxes(cent_rms,1,0)
cent_max=swapaxes(cent_max,1,0); cent_min=swapaxes(cent_min,1,0)
sig_ave =swapaxes( sig_ave,1,0); sig_rms =swapaxes( sig_rms,1,0)
eps_ave =swapaxes( eps_ave,1,0); eps_rms =swapaxes( eps_rms,1,0)
xmax =swapaxes( xmax,1,0); xmin =swapaxes( xmin,1,0)
if Nptcl[0]>0:
loss_num=swapaxes(loss_num,1,0); loss_pow=swapaxes(loss_pow,1,0)
den =swapaxes( den,1,0); den_pow =swapaxes( den_pow,1,0)
#-- Take care ave and rms
cent_ave=cent_ave/Nrun; cent_rms=sqrt(cent_rms/Nrun)
sig_ave = sig_ave/Nrun; sig_rms =sqrt( sig_rms/Nrun)
eps_ave = eps_ave/Nrun; eps_rms =sqrt( eps_rms/Nrun)
if Nptcl[0]>0:
loss_num_ave=1.0*array(loss_num_ave)/Nrun; loss_num_rms=sqrt(1.0*array(loss_num_rms)/Nrun)
loss_pow_ave= array(loss_pow_ave)/Nrun; loss_pow_rms=sqrt( array(loss_pow_rms)/Nrun)
#-- Change units, m => mm, pi-m-rad => pi-mm-mrad
apt *=1e3
eps_ave*=1e6; eps_rms*=1e6
for k in (0,1,4,5):
cent_ave[k]*=1e3; cent_rms[k]*=1e3
cent_max[k]*=1e3; cent_min[k]*=1e3
sig_ave[k] *=1e3; sig_rms[k] *=1e3
xmax[k] *=1e3; xmin[k] *=1e3
#-- Define std (around to avoid sqrt(-eps))
if Nrun>1:
cent_std=sqrt(around(cent_rms**2-cent_ave**2,12))
sig_std =sqrt(around( sig_rms**2- sig_ave**2,12))
eps_std =sqrt(around( eps_rms**2- eps_ave**2,12))
cent_std=nan_to_num(cent_std) # Replace nan with 0
sig_std =nan_to_num( sig_std) # Replace nan with 0
eps_std =nan_to_num( eps_std) # Replace nan with 0
if Nptcl[0]>0:
loss_num_std=sqrt(around(loss_num_rms**2-loss_num_ave**2,16))
loss_pow_std=sqrt(around(loss_pow_rms**2-loss_pow_ave**2,16))
loss_num_std=nan_to_num(loss_num_std) # Replace nan with 0
loss_pow_std=nan_to_num(loss_pow_std) # Replace nan with 0
#-- Convert to list (just in case...)
apt = apt.tolist(); accpt = accpt.tolist(); accpt.append(accpt[0]); del accpt[0]
cent_ave=cent_ave.tolist(); cent_rms=cent_rms.tolist()
cent_max=cent_max.tolist(); cent_min=cent_min.tolist()
sig_ave = sig_ave.tolist(); sig_rms = sig_rms.tolist()
eps_ave = eps_ave.tolist(); eps_rms = eps_rms.tolist()
xmax = xmax.tolist(); xmin=xmin.tolist()
if Nptcl[0]>0:
loss_num = loss_num.tolist(); loss_pow = loss_pow.tolist()
loss_num_ave=loss_num_ave.tolist(); loss_pow_ave=loss_pow_ave.tolist()
loss_num_rms=loss_num_rms.tolist(); loss_pow_rms=loss_pow_rms.tolist()
den = den.tolist(); den_pow = den_pow.tolist()
if Nrun>1:
cent_std=cent_std.tolist()
sig_std = sig_std.tolist()
eps_std = eps_std.tolist()
if Nptcl[0]>0:
loss_num_std=loss_num_std.tolist()
self.idx_elem=idx_elem
self.s =s
self.apt =apt
self.accpt =accpt
self.Nptcl =Nptcl
self.Ibeam =Ibeam
if Nrun==1:
self.cent=cent_ave
self.sig = sig_ave
self.eps = eps_ave
self.xmax= xmax
self.xmin= xmin
if Nptcl[0]>0:
self.loss_num=loss_num[0]; self.loss_pow=loss_pow[0]
self.den = den; self.den_pow = den_pow
else:
self.cent_ave=cent_ave; self.cent_rms=cent_rms; self.cent_std=cent_std
self.cent_max=cent_max; self.cent_min=cent_min
self.sig_ave = sig_ave; self.sig_rms = sig_rms; self.sig_std = sig_std
self.eps_ave = eps_ave; self.eps_rms = eps_rms; self.eps_std = eps_std
self.xmax = xmax; self.xmin = xmin
if Nptcl[0]>0:
self.loss_num =loss_num ; self.loss_pow =loss_pow
self.loss_num_ave=loss_num_ave; self.loss_pow_ave=loss_pow_ave
self.loss_num_rms=loss_num_rms; self.loss_pow_rms=loss_pow_rms
self.loss_num_std=loss_num_std; self.loss_pow_std=loss_pow_std
self.loss_num_max=loss_num_max; self.loss_pow_max=loss_pow_max
self.loss_num_min=loss_num_min; self.loss_pow_min=loss_pow_min
self.den = den ; self.den_pow = den_pow
#-- Option outputs
self.idx_4_elem_end=[len(idx_elem)-1-idx_elem[::-1].index(i) for i in list(set(idx_elem))]
#-------- Functions
#---- Dist related
def x2dst(x,mass,freq,Ibeam,path_file='part_dtl1_new.dst'):
'''
Output a TraceWin's .dst file from x and etc.
Input: x (Nptcl,6)
For binary characters see https://docs.python.org/2/library/struct.html
2014.10.03
'''
file=open(path_file,'w')
out =pack('b',125)
out+=pack('b',100)
out+=pack('i',len(x))
out+=pack('d',Ibeam)
out+=pack('d',freq)
out+=pack('b',125) # Typo in the manual !!!!
x=list(chain(*x)) # Flatten x
for x_i in x: out+=pack('d',x_i)
out+=pack('d',mass)
def plt2x(path_file):
'''
Extract x and etc from a TraceWin's binary .plt file.
The units are (cm,rad,cm,rad,rad,MeV,loss).
Output: x (Nelem,Nptcl,7)
For binary characters see https://docs.python.org/2/library/struct.html
2014.10.06
'''
# Hetero info
file.read(1); file.read(1) # 125,100
Nelem=unpack('i',file.read(4))[0]
Nptcl=unpack('i',file.read(4))[0]
Ibeam=unpack('d',file.read(8))[0]
freq =unpack('d',file.read(8))[0]
mass =unpack('d',file.read(8))[0]
for i in range(Nelem):
try:
i_unk.append(unpack('b',file.read(1))[0])
i_elem.append(unpack('i',file.read(4))[0])
s.append(unpack('d',file.read(8))[0])
phs.append(unpack('d',file.read(8))[0])
ken.append(unpack('d',file.read(8))[0])
x.append([[unpack('f',file.read(4))[0] for l in range(7)] for k in range(Nptcl)])
except:
break
return x,mass,freq,Ibeam,i_unk,i_elem,s,phs,ken
def x2plt(x,mass,freq,Ibeam,i_unk,i_elem,s,phs,ken,path_file='dtl1_new.plt'):
'''
Output a TraceWin's .plt file from x and etc.
Input: x (Nelem,Nptcl,7)
For binary characters see https://docs.python.org/2/library/struct.html
file=open(path_file,'w')
out =pack('b',125)
out+=pack('b',100)
out+=pack('i',len(x))
out+=pack('i',len(x[0]))
out+=pack('d',Ibeam)
out+=pack('d',freq)
out+=pack('d',mass)
for i in range(len(x)):
out+=pack('b',i_unk[i])
#out+=pack('i',i_elem[i])
out+=pack('i',i) # To truncate the elem number
out+=pack('d',s[i])
out+=pack('d',phs[i])
out+=pack('d',ken[i])
x_i=list(chain(*x[i]))
for x_ik in x_i: out+=pack('f',x_ik)
print >>file,out
file.close()
#---- Data related
def x2twiss(x):
'''
Calculate twiss from x. Note sig = sqrt(bet*eps).
Input : x (Nptcl,6)
Output: eps,bet,alf,gam
2015.07.30
'''
x =[x_i-mean(x_i) for x_i in array(x).transpose()]
sig=[[mean(x_i*x_k) for x_k in x] for x_i in x]
eps=[ sqrt(det(array(sig)[i:i+2][:,i:i+2])) for i in (0,2,4)]
bet=[ sig[i ][i ]/eps[i/2] for i in (0,2,4)]
alf=[-sig[i ][i+1]/eps[i/2] for i in (0,2,4)]
gam=[ sig[i+1][i+1]/eps[i/2] for i in (0,2,4)]
return eps,bet,alf,gam
def x2twiss_halo(x,sig_cut=(None,None,None)):
'''
Calculate twiss of halo particles (r > sig_cut).
Note halo particles are different for each plane.
Input : x (Nptcl,6), sig_cut
Output: eps,bet,alf,gam
2015.07.30
'''
eps,bet,alf,gam=x2twiss(x)
for k in (0,2,4):
if sig_cut[k/2]!=None:
x=x.transpose();
r_nom_sq=(x[k]**2+(alf[k/2]*x[k]+bet[k/2]*x[k+1])**2)/(bet[k/2]*eps[k/2])
x=x.transpose();
x_halo=[x[i] for i in range(len(x)) if r_nom_sq[i]>sig_cut[k/2]**2]
eps_tmp,bet_tmp,alf_tmp,gam_tmp=x2twiss(x_halo)
eps[k/2]=eps_tmp[k/2]
bet[k/2]=bet_tmp[k/2]
alf[k/2]=alf_tmp[k/2]
gam[k/2]=gam_tmp[k/2]
return eps,bet,alf,gam
def loss_elem2den(s,loss,file_name_dt='',dlt_dt=5e-6):
This function calculates loss density [W/m] from s and loss
together with the file of the DTL's cell and DT lengths.
# Define L
L=[s[i]-s[i-1] for i in range(1,len(s))]; L.insert(0,0.0)
# Take care DTL part if "file_name_dt" is given
try:
# DTL cell and DT lengths
with open(file_name_dt) as file:
L_cell,L_dt=array([map(float,lin.split()) for lin in file.readlines()]).transpose()[:2]
# Replace cell lengths with DT lengths
Ndt=0
for i in range(len(L)):
dL=list(abs(L_cell-L[i])); dL_min=min(dL)
if dL_min<dlt_dt:
L[i]=L_dt[dL.index(dL_min)]; Ndt+=1
# Check
if Ndt!=len(L_dt):
Ryoichi Miyamoto
committed
print 'drift-tube # unmatched, in file: '+str(len(L_dt))+', matched: '+str(Ndt)
except:
pass
# Take care loss in 0 length elem
loss_den=loss[::]
for i in range(len(loss_den)):
if loss_den[i]!=0.0 and L[i]==0.0:
Ryoichi Miyamoto
committed
print 'Caution: inf loss density at elem # ',i,'!!!!'
for k in range(i)[::-1]:
if L[k]!=0.0: loss_den[k]+=loss_den[i]; loss_den[i]=0.0; break
# Calculate loss den
for i in range(len(loss_den)):
if loss_den[i]!=0.0: loss_den[i]/=L[i]
return loss_den
Ryoichi Miyamoto
committed
exit()
# Define L
l=[s[i+1]-s[i] for i in range(len(s)-1)]; l.insert(0,0.0)
#L=[s[i+1]-s[i] for i in range(len(s)-1)]; L.insert(0,0.0)
# Take care DTL part if "file_name_dt" is given
try:
# Read DTL cell and DT lengths
with open(file_name_dt) as file:
l_cell,l_dt=array([map(float,lin.split()) for lin in file.readlines()]).transpose()[:2]
# Replace cell lengths with DT lengths
Ndt=0
for i in range(len(l)):
dl=list(abs(l_cell-l[i])); dl_min=min(dl)
if dl_min<dlt_dt:
l[i]=l_dt[dl.index(dl_min)]; Ndt+=1
# Check
if Ndt!=len(l_cell):
Ryoichi Miyamoto
committed
print 'drift-tube # unmatched, in file: '+str(len(l_cell))+', matched: '+str(Ndt)
for i in range(len(l)):
if l[i]==0.0 and loss[i]!=0.0:
if i==1:
Ryoichi Miyamoto
committed
print 'The 1st elem has 0 length and yet has a loss, exiting...'; exit()
else:
k=1
while l[i-k]==0.0: k+=1
loss[i-k]+=loss[i]; loss[i]=0.0
Ryoichi Miyamoto
committed
print 'Caution: Inf loss density at elem #',i,'!!!!'
loss_den=[]
for i in range(len(l)):
if loss[i]==0.0: loss_den.append(0.0)
else : loss_den.append(loss[i]/l[i])
## def loss_elem2den(s,loss,path_file_dt,dlt_dt=5e-6):
## '''
## This function calculates loss density [W/m] from s, loss,
## and the file of the DTL's cell and DT lengths.
## 2015.01.30
## '''
## # Define l
## l=[s[i+1]-s[i] for i in range(len(s)-1)]; l.insert(0,0.0)
## # DTL cell and DT lengths.
## file=open(path_file_dt)
## l_cell,l_dt=array([map(float,lin.split()) for lin in file.readlines()]).transpose()[:2]
## file.close()
## # Replace l's in DTL with DT lengths.
## Ndt=0
## for i in range(len(l)):
## dl=list(abs(l_cell-l[i])); dl_min=min(dl)
## if dl_min<dlt_dt:
## l[i]=l_dt[dl.index(dl_min)]; Ndt+=1
## # Check the replacement.
## if Ndt!=len(l_cell):
## print 'drift-tube # unmatched, in file: '+str(len(l_cell))+', from matching: '+str(Ndt)
Ryoichi Miyamoto
committed
## print 'review the threshold, exiting...'; exit()
## # Treat inf loss den.
## for i in range(len(l)):
## if l[i]==0.0 and loss[i]!=0.0:
## if i==1:
Ryoichi Miyamoto
committed
## print 'The 1st elem has 0 length and yet has a loss, exiting...'; exit()
## else:
## k=1
## while l[i-k]==0.0: k+=1
## loss[i-k]+=loss[i]; loss[i]=0.0
## print 'Caution: Inf loss density at elem #',i,'!!!!'
## # Finally, convert to loss density.
## loss_den=[]
## for i in range(len(l)):
## if loss[i]==0.0: loss_den.append(0.0)
## else : loss_den.append(loss[i]/l[i])
## return loss_den
def partran_end_all(file_name_in_all,file_name_out):
'''
- Reads multiple partran1.out files and extracts the data at the end.
- The same format as partran1.out allows to be used by PARTRAN class.
- The elem # is replaced with the file #.
2015.11.25
'''
# Extract header from 1 file.
with open(file_name_in_all[0],'r') as file:
header=''
for lin in file.readlines():
header+=lin
if lin.split()[0]=='####': break
# Extract output data for each file and write.
with open(file_name_out,'w') as file_out:
file_out.write(header)
for i in range(len(file_name_in_all)):
lin=check_output(['tail','-1',file_name_in_all[i]]).split()
# Replace elem # with file #.
file_out.write('%5s'%str(i))