diff --git a/bin/a2x b/bin/a2x.sh similarity index 100% rename from bin/a2x rename to bin/a2x.sh diff --git a/bin/event_peak.py b/bin/event_peak.py index a295edd41..18e97e63c 100755 --- a/bin/event_peak.py +++ b/bin/event_peak.py @@ -1,14 +1,21 @@ #!/usr/bin/env python from __future__ import print_function import numpy as num +import sys import presto.events as evts from presto import kuiper from presto.Pgplot import * +if len(sys.argv) != 2: + print("\nusage: {} file\n".format(sys.argv[0])) + sys.exit(1) + + def calc_phases(events, f, fd): return num.fmod(events*(f+(0.5*fd*events)), 1.0) + events = num.loadtxt(sys.argv[1]) events.sort() print("Read %d events from '%s'." % (events.size, sys.argv[1])) diff --git a/bin/fb_truncate.py b/bin/fb_truncate.py index b91fb28c4..019761f59 100755 --- a/bin/fb_truncate.py +++ b/bin/fb_truncate.py @@ -9,13 +9,15 @@ import sys import copy -import optparse +from argparse import ArgumentParser import numpy as np from presto import filterbank BLOCKSIZE = 1e5 # Number of spectra to manipulate at once -def main(): + + +def main(args): infn = args[0] print("Reading filterbank file (%s)" % infn) fil = filterbank.FilterbankFile(infn) @@ -106,36 +108,34 @@ def main(): if __name__ == '__main__': - parser = optparse.OptionParser(prog='fb_truncate.py', \ - version="v0.1 Patrick Lazarus (Aug. 28, 2012)") - parser.add_option("-L", "--lo-freq", dest="lo_freq", type='float', \ - help="Desired low frequency for output file. Note: " \ - "actual low frequency will be rounded to the nearest" \ - "channel (Default: Don't truncate low-freq channels)", \ + parser = ArgumentParser(description="v0.1 Patrick Lazarus (Aug. 28, 2012)") + parser.add_argument("-L", "--lo-freq", dest="lo_freq", type=float, + help="Desired low frequency for output file. Note: " + "actual low frequency will be rounded to the nearest" + "channel (Default: Don't truncate low-freq channels)", default=None) - parser.add_option("-H", "--hi-freq", dest="hi_freq", type='float', \ - help="Desired high frequency for output file. Note: " \ - "actual high frequency will be rounded to the nearest" \ - "channel (Default: Don't truncate high-freq channels)", \ + parser.add_argument("-H", "--hi-freq", dest="hi_freq", type=float, + help="Desired high frequency for output file. Note: " + "actual high frequency will be rounded to the nearest" + "channel (Default: Don't truncate high-freq channels)", default=None) - parser.add_option("-s", "--start-time", dest="start_time", type='float', \ - help="Start of desired range of input file to write " \ - "to output file. Note: The actual start time will " \ - "be rounded to the nearest sample.(Default: Don't " \ + parser.add_argument("-s", "--start-time", dest="start_time", type=float, + help="Start of desired range of input file to write " + "to output file. Note: The actual start time will " + "be rounded to the nearest sample.(Default: Don't " "truncate from start of file.)", default=None) - parser.add_option("-e", "--end-time", dest="end_time", type='float', \ - help="End of desired range of input file to write " \ - "to output file. Note: The actual end time will " \ - "be rounded to the nearest sample. (Default: " \ + parser.add_argument("-e", "--end-time", dest="end_time", type=float, + help="End of desired range of input file to write " + "to output file. Note: The actual end time will " + "be rounded to the nearest sample. (Default: " "Don't truncate from end of file.)", default=None) - parser.add_option("--block-size", dest='block_size', default=BLOCKSIZE, \ - type='float', \ - help="Number of spectra per block. This is the amount " \ - "of data manipulated/written at a time. (Default: " \ + parser.add_argument("--block-size", dest='block_size', default=BLOCKSIZE, + type=float, + help="Number of spectra per block. This is the amount " + "of data manipulated/written at a time. (Default: " " %d spectra)" % BLOCKSIZE) - parser.add_option("-o", "--outname", dest='outname', action='store', \ + parser.add_argument("-o", "--outname", dest='outname', action='store', required=True, help="The name of the output file.") (options, args) = parser.parse_args() - if not hasattr(options, 'outname'): - raise ValueError("An output file name _must_ be provided!") - main() + + main(args) diff --git a/bin/filter_zerolags.py b/bin/filter_zerolags.py index aafd5c018..e2a454c40 100755 --- a/bin/filter_zerolags.py +++ b/bin/filter_zerolags.py @@ -4,6 +4,12 @@ import numpy as N import sys, scipy.io, scipy.signal + +if len(sys.argv) != 2: + print("\nusage: {} file\n".format(sys.argv[0])) + sys.exit(1) + + plot=0 infilenm = sys.argv[1] diff --git a/bin/fit_circular_orbit.py b/bin/fit_circular_orbit.py index 78620ed08..9e304f56d 100755 --- a/bin/fit_circular_orbit.py +++ b/bin/fit_circular_orbit.py @@ -12,6 +12,7 @@ period = num.asarray([]) time = num.asarray([]) + def parse_eph(filenm): global period, time suffix = filenm.split(".")[-1] @@ -41,14 +42,17 @@ def parse_eph(filenm): period = num.concatenate((period, newps)) print("%13.7f (%0.1f sec): " % (epoch, T), fs) + def orbeqn(Ppxt, times): # P = Ppsr, p = Porb, x = a*sin(i)/s, t = T_o phi = pu.TWOPI*(times - Ppxt[3])*86400.0/Ppxt[1] return Ppxt[0]*(1.0+pu.TWOPI*Ppxt[2]/Ppxt[1]*num.cos(phi)) + def funct(Ppxt, times, measured): return orbeqn(Ppxt, times) - measured + if __name__ == '__main__': if len(sys.argv)==1: print("\nusage: fit_circular_orbit.py P_psr P_orb X_orb parfiles or bestprofs") diff --git a/bin/guppidrift2fil.py b/bin/guppidrift2fil.py index 2350bd41d..23944a62c 100755 --- a/bin/guppidrift2fil.py +++ b/bin/guppidrift2fil.py @@ -268,12 +268,12 @@ def main(fits_fn, outfn, nbits, \ input_nbits=input_nbits) if flip_band: subint = np.fliplr(subint) - subint /= scale_fac - outfil.append_spectra(subint) - pcnt = "%d" % (i*100.0/output_subints) - if pcnt != oldpcnt: - sys.stdout.write("% 4s%% complete\r" % pcnt) - sys.stdout.flush() + subint /= scale_fac + outfil.append_spectra(subint) + pcnt = "%d" % (i*100.0/output_subints) + if pcnt != oldpcnt: + sys.stdout.write("% 4s%% complete\r" % pcnt) + sys.stdout.flush() print("Done ") outfil.close() diff --git a/bin/pfd2png b/bin/pfd2png.sh similarity index 100% rename from bin/pfd2png rename to bin/pfd2png.sh diff --git a/bin/plot_spd.py b/bin/plot_spd.py index 06fc1d83f..c5305a432 100755 --- a/bin/plot_spd.py +++ b/bin/plot_spd.py @@ -1,531 +1,5 @@ #! /usr/bin/env python -""" -plot_spd.py - -Generate spd plots either using information from the .spd files that are generated by make_spd.py. -Usage: plot_spd.py [OPTIONS] <.spd file> <.singlepulse files (optional: - if not provided, will leave DM vs Time window blank).> - -Chitrang Patel - June 10, 2016. -""" -from __future__ import print_function -from builtins import map -import numpy as np -import optparse -import tarfile -from subprocess import Popen, PIPE -import presto.singlepulse.sp_pgplot as sp_pgplot -import presto.singlepulse.read_spd as read_spd -import presto.singlepulse.spio as spio - - -def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=False, outfile="spdplot", just_waterfall=True, \ - integrate_spec=True, integrate_ts=True, disp_pulse=True, tar=None): - """ - Generates spd plots which include the following subplots: - De-dispersed Zero-DM filtered Waterfall plot - De-dispersed Waterfall plot - optional subplots: - Dispersed Zero-DM filtered Waterfall plot (Inset of the corresponding dedispersed plot). - Dispersed Waterfall plot ((Inset of the corresponding dedispersed plot).). - Dedispersed zero-DM filtered time series for the corresponding waterfall plot. - Dedispersed time series for the corresponding waterfall plot. - Spectra of the de-dispersed pulse for each of the above waterfalled plots. - SNR vs DM - DM vs. Time - - Inputs: - spdfile: A .spd file. - Optional Inputs: - spec_width: Twice this number times the pulse_width around the pulse to consider for the spectrum - loc_pulse: Fraction of the window length where the pulse is located.(eg. 0.25 = 1/4th of the way in. - 0.5 = middle of the plot) - singlepulsefiles: list of .singlepulse files - xwin: plot in an xwin window? - outfile: name of the output file you want. - just_waterfall: Do you only want to display the waterfall plots? - integrate_spec: Do you want to show the pulse spectrum? - integrate_ts: Do you want to show the time series? - disp_pulse: Do you want to show the inset dispersed pulse? - tar: Supply the tarball of the singlepulse files instead of individual files. - """ - if not spdfile.endswith(".spd"): - raise ValueError("The first file must be a .spd file") - #npzfile = np.load(spdfile) - spdobj = read_spd.spd(spdfile) - ##### Read in the header information and other required variables for the plots. ###### - #text_array = npzfile['text_array'] - man_params = spdobj.man_params - fn = spdobj.filename - telescope = spdobj.telescope - RA = spdobj.ra - dec = spdobj.dec - MJD = spdobj.mjd - mjd = Popen(["mjd2cal", "%f"%MJD], stdout=PIPE, stderr=PIPE) - date, err = mjd.communicate() - date = date.split()[2:5] - rank = spdobj.rank - nsub = spdobj.waterfall_nsubs - nbins = spdobj.nsamp - subdm = dm = sweep_dm = spdobj.best_dm - sigma = spdobj.sigma - sample_number = spdobj.pulse_peak_sample - duration = spdobj.waterfall_duration - width_bins = spdobj.pulsewidth_bins - pulse_width = spdobj.pulsewidth_seconds - tsamp = spdobj.tsamp - Total_observed_time = spdobj.total_obs_time - topo_start = spdobj.pulse_peak_time - start = topo_start - loc_pulse*duration - datastart = spdobj.waterfall_start_time - datasamp = spdobj.waterfall_tsamp - datanumspectra = spdobj.waterfall_prededisp_nbins - min_freq = spdobj.min_freq - max_freq = spdobj.max_freq - sweep_duration = spdobj.sweep_duration - sweeped_start = spdobj.sweep_start_time - bary_start = spdobj.bary_pulse_peak_time - downsamp = datasamp/tsamp - if xwin: - pgplot_device = "/XWIN" - else: - pgplot_device = "" - if pgplot_device: - sp_pgplot.ppgplot.pgopen(pgplot_device) - else: - if (outfile == "spdplot"): # default filename - if rank: - sp_pgplot.ppgplot.pgopen(fn[:-5]+'_DM%.1f_%.1fs_rank_%i.spd.ps/VPS'%(subdm, (start+loc_pulse*duration), rank)) - else: - sp_pgplot.ppgplot.pgopen(fn[:-5]+'_DM%.1f_%.1fs.spd.ps/VPS'%(subdm, (start+loc_pulse*duration))) - else: - if rank: - sp_pgplot.ppgplot.pgopen(outfile+'_DM%.1f_%.1fs_rank_%i.spd.ps/VPS'%(subdm, (start+loc_pulse*duration), rank)) - else: - sp_pgplot.ppgplot.pgopen(outfile+'_DM%.1f_%.1fs.spd.ps/VPS'%(subdm, (start+loc_pulse*duration))) - if (just_waterfall == False): - sp_pgplot.ppgplot.pgpap(10.25, 8.5/11.0) - # Dedispersed waterfall plot - zerodm - OFF - array = spdobj.data_nozerodm_dedisp.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.50, 0.80) - sp_pgplot.ppgplot.pgswin(datastart-start, datastart-start+datanumspectra*datasamp, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - if not integrate_spec: - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp], rangey = [min_freq, max_freq], image = 'apjgrey') - - #### Plot Dedispersed Time series - Zerodm filter - Off - Dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp - if integrate_ts: - sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.80, 0.90) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(Dedisp_ts), 1.05*np.max(Dedisp_ts)) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,Dedisp_ts) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgsci(1) - - errx1 = np.array([0.60 * (datastart-start+duration)]) - erry1 = np.array([0.60 * np.max(Dedisp_ts)]) - erry2 = np.array([np.std(Dedisp_ts)]) - errx2 = np.array([pulse_width]) - sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) - sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - - #### Plot Spectrum - Zerodm filter - Off - if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] - Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) - sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.5, 0.8) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") - sp_pgplot.ppgplot.pgsch(0.7) - sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") - sp_pgplot.ppgplot.pgsch(0.8) - - #Dedispersed waterfall plot - Zerodm ON - sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.1, 0.40) - sp_pgplot.ppgplot.pgswin(datastart-start , datastart-start+datanumspectra*datasamp, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s"%datastart) - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - if not integrate_spec: - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") - array = spdobj.data_zerodm_dedisp.astype(np.float64) - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp],rangey = [min_freq, max_freq],image = 'apjgrey') - #### Plot Dedispersed Time series - Zerodm filter - On - dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp - if integrate_ts: - sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.40, 0.50) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(dedisp_ts), 1.05*np.max(dedisp_ts)) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,dedisp_ts) - errx1 = np.array([0.60 * (datastart-start+duration)]) - erry1 = np.array([0.60 * np.max(dedisp_ts)]) - erry2 = np.array([np.std(dedisp_ts)]) - errx2 = np.array([pulse_width]) - sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) - sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - - #### Plot Spectrum - Zerodm filter - On - if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] - Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) - sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.1, 0.4) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") - sp_pgplot.ppgplot.pgsch(0.7) - sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") - sp_pgplot.ppgplot.pgsch(0.8) - - if disp_pulse: - # Sweeped waterfall plot Zerodm - OFF - array = spdobj.data_nozerodm.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.50, 0.70) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(4) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) - sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') - delays = spdobj.dmsweep_delays - freqs = spdobj.dmsweep_freqs - sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration - sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgslw(3) - - # Sweeped waterfall plot Zerodm - ON - array = spdobj.data_zerodm.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.1, 0.3) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(4) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) - sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') - sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration - sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) - sp_pgplot.ppgplot.pgsci(1) - - #### Figure texts - if integrate_spec: - sp_pgplot.ppgplot.pgsvp(0.81, 0.97, 0.64, 0.909) - sp_pgplot.ppgplot.pgsch(0.62) - else: - sp_pgplot.ppgplot.pgsvp(0.745, 0.97, 0.64, 0.909) - sp_pgplot.ppgplot.pgsch(0.7) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" %RA) - sp_pgplot.ppgplot.pgmtxt('T', -2.6, 0.01, 0.0, "DEC: %s" %dec) - sp_pgplot.ppgplot.pgmtxt('T', -4.1, 0.01, 0.0, "MJD: %f" %MJD) - sp_pgplot.ppgplot.pgmtxt('T', -5.6, 0.01, 0.0, "Obs date: %s %s %s" %(date[0], date[1], date[2])) - sp_pgplot.ppgplot.pgmtxt('T', -7.1, 0.01, 0.0, "Telescope: %s" %telescope) - sp_pgplot.ppgplot.pgmtxt('T', -8.6, 0.01, 0.0, "DM: %.2f pc cm\u-3\d" %dm) - if sigma: - sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\dMAX\u: %.2f" %sigma) - else: - sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\dMAX\u: N/A") - sp_pgplot.ppgplot.pgmtxt('T', -11.6, 0.01, 0.0, "Number of samples: %i" %nbins) - sp_pgplot.ppgplot.pgmtxt('T', -13.1, 0.01, 0.0, "Number of subbands: %i" %nsub) - sp_pgplot.ppgplot.pgmtxt('T', -14.6, 0.01, 0.0, "Pulse width: %.2f ms" %(pulse_width*1e3)) - sp_pgplot.ppgplot.pgmtxt('T', -16.1, 0.01, 0.0, "Sampling time: %.3f \gms" %(tsamp*1e6)) - sp_pgplot.ppgplot.pgmtxt('T', -17.6, 0.0, 0.0, "Bary pulse peak time: %.2f s" %(bary_start)) - sp_pgplot.ppgplot.pgsvp(0.07, 0.7, 0.01, 0.05) - sp_pgplot.ppgplot.pgmtxt('T', -2.1, 0.01, 0.0, "%s" %fn) - - #DM vs SNR - if not man_params: - dm_arr = np.float32(spdobj.dmVt_this_dms) - sigma_arr = np.float32 (spdobj.dmVt_this_sigmas) - time_arr = np.float32 (spdobj.dmVt_this_times) - if integrate_spec: - sp_pgplot.ppgplot.pgsvp(0.55, 0.80, 0.65, 0.90) - else: - sp_pgplot.ppgplot.pgsvp(0.48, 0.73, 0.65, 0.90) - sp_pgplot.ppgplot.pgswin(np.min(dm_arr), np.max(dm_arr), 0.95*np.min(sigma_arr), 1.05*np.max(sigma_arr)) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)") - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-noise") - sp_pgplot.ppgplot.pgpt(dm_arr, sigma_arr, 20) - else: - dm_arr = np.array([]) - sigma_arr = np.array([]) - time_arr = np.array([]) - if integrate_spec: - sp_pgplot.ppgplot.pgsvp(0.55, 0.80, 0.65, 0.90) - else: - sp_pgplot.ppgplot.pgsvp(0.48, 0.73, 0.65, 0.90) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)") - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-noise") - - # DM vs Time - print("Making arrays for DM vs time plot") - spfiles = singlepulsefiles - threshold = 5.0 - if len(spfiles) > 2: - dm_list = list(map(np.float32, list(dm_arr))) - time_list = list(map(np.float32, list(time_arr))) - if integrate_spec: - sp_pgplot.ppgplot.pgsvp(0.55, 0.97, 0.1, 0.54) - else: - sp_pgplot.ppgplot.pgsvp(0.48, 0.97, 0.1, 0.54) - dms, times, sigmas, widths, filelist = spio.gen_arrays(dm_arr, spfiles, tar, threshold) - sp_pgplot.dm_time_plot(dms, times, sigmas, dm_list, sigma_arr, time_list, Total_observed_time, xwin) - else: - print("You need a .singlepulse.tgz file to plot DM vs Time plot.") - if integrate_spec: - sp_pgplot.ppgplot.pgsvp(0.55, 0.97, 0.1, 0.54) - else: - sp_pgplot.ppgplot.pgsvp(0.48, 0.97, 0.1, 0.54) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)") - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\u-3\d)") - else: - #sp_pgplot.ppgplot.pgpap(10.25, 10.0/5.0) - sp_pgplot.ppgplot.pgpap(8.0, 1.5) - # Dedispersed waterfall plot - zerodm - OFF - array = spdobj.data_nozerodm_dedisp.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.44, 0.75) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart -start+datanumspectra*datasamp, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp], rangey = [min_freq, max_freq], image = 'apjgrey') - - #### Plot Dedispersed Time series - Zerodm filter - Off - Dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp - if integrate_ts: - sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.75, 0.83) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(Dedisp_ts), 1.05*np.max(Dedisp_ts)) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,Dedisp_ts) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgsci(1) - errx1 = np.array([0.60 * (datastart-start+duration)]) - erry1 = np.array([0.60 * np.max(Dedisp_ts)]) - erry2 = np.array([np.std(Dedisp_ts)]) - errx2 = np.array([pulse_width]) - sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) - sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - - #### Plot Spectrum - Zerodm filter - Off - if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] - Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) - sp_pgplot.ppgplot.pgsvp(0.7, 0.9, 0.44, 0.75) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") - sp_pgplot.ppgplot.pgsch(0.7) - sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") - sp_pgplot.ppgplot.pgsch(0.8) - - #Dedispersed waterfall plot - Zerodm ON - array = spdobj.data_zerodm_dedisp.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.05, 0.36) - sp_pgplot.ppgplot.pgswin(datastart-start , datastart-start+datanumspectra*datasamp, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s"%datastart) - sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp],rangey = [min_freq, max_freq],image = 'apjgrey') - - - #### Plot Dedispersed Time series - Zerodm filter - On - dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp - if integrate_ts: - sp_pgplot.ppgplot.pgsvp(0.1, 0.7, 0.36, 0.44) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(dedisp_ts), 1.05*np.max(dedisp_ts)) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,dedisp_ts) - errx1 = np.array([0.60 * (datastart-start+duration)]) - erry1 = np.array([0.60 * np.max(dedisp_ts)]) - erry2 = np.array([np.std(dedisp_ts)]) - errx2 = np.array([pulse_width]) - sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) - sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - - #### Plot Spectrum - Zerodm filter - On - if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] - Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) - sp_pgplot.ppgplot.pgsvp(0.70, 0.90, 0.05, 0.36) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) - sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") - sp_pgplot.ppgplot.pgsch(0.7) - sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") - sp_pgplot.ppgplot.pgsch(0.8) - if disp_pulse: - # Sweeped waterfall plot Zerodm - OFF - array = spdobj.data_nozerodm.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.44, 0.65) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(4) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) - sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') - delays = spdobj.dmsweep_delays - freqs = spdobj.dmsweep_freqs - sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration - sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) - sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgslw(3) - - # Sweeped waterfall plot Zerodm - ON - array = spdobj.data_zerodm.astype(np.float64) - sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.05, 0.25) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) - sp_pgplot.ppgplot.pgsch(0.8) - sp_pgplot.ppgplot.pgslw(4) - sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) - sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') - sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration - sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) - sp_pgplot.ppgplot.pgsci(1) - - #### Figure texts - sp_pgplot.ppgplot.pgsvp(0.05, 0.95, 0.8, 0.9) - sp_pgplot.ppgplot.pgsch(0.65) - sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" %RA) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.01, 0.0, "DEC: %s" %dec) - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.01, 0.0, "MJD: %f" %MJD) - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.01, 0.0, "Obs date: %s %s %s" %(date[0], date[1], date[2])) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.35, 0.0, "Telescope: %s" %telescope) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.35, 0.0, "DM: %.2f pc cm\u-3\d" %dm) - if sigma: - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\dMAX\u: %.2f" %sigma) - else: - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\dMAX\u: N/A") - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.35, 0.0, "Number of samples: %i" %nbins) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.65, 0.0, "Number of subbands: %i" %nsub) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.65, 0.0, "Pulse width: %.2f ms" %(pulse_width*1e3)) - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.65, 0.0, "Sampling time: %.3f \gms" %(tsamp*1e6)) - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.65, 0.0, "Bary pulse peak time: %.2f s" %(bary_start)) - sp_pgplot.ppgplot.pgiden() - sp_pgplot.ppgplot.pgclos() - -def main(): - parser = optparse.OptionParser(prog="plot_spd.py", \ - usage = "%prog [OPTIONS] INFILE (.spd file) INFILES (.singlepulse files)") - parser.add_option("-x", "--xwin", action="store_true", dest="xwin", - default=False, help="Don't make a postscript plot, just use an X-window") - parser.add_option("-o", dest= "outfile", type = "string", default = "spdplot", \ - help= "give a base name to the saved plot. DM, time and" \ - "rank values will be added automatically" ) - parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice this number times the pulse width" \ - "is the window around the pulse considered for the spectrum. (Default: 1.5)", \ - default=1.5) - parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of the window length where the pulse is located." \ - "(Default: 0.5 half way in.)", \ - default=0.5) - parser.add_option("--just-waterfall", action="store_true", dest="just_waterfall", - default=False, help="Just produce the waterfall plots.") - parser.add_option("--show-spec", action="store_true", dest="integrate_spec", - default=False, help="Show spectrum.(Default: Show spectrum)") - parser.add_option("--show-ts", action="store_true", dest="integrate_ts", - default=False, help="Show time series.(Default: Don't show time series)") - parser.add_option("--show-sweep", action="store_true", dest="disp_pulse", - default=False, help="Show dispersed pulse.(Default: Don't show dispersed pulse)") - (options, args) = parser.parse_args() - - if len(args) == 0: - raise ValueError("need a .spd file and .singlepulse files in that order.") - if not args[0].endswith(".spd"): - raise ValueError("the first file must be a .spd file") - if len(args) == 2: - tar = tarfile.open(args[1], "r:gz")# read in the tarball - filenames = tar.getnames()# get the filenames - plot(args[0], filenames, options.spec_width, options.loc_pulse, options.xwin, options.outfile, options.just_waterfall, \ - options.integrate_spec, options.integrate_ts, options.disp_pulse, tar)# make the sp plots - tar.close() - else: - plot(args[0], args[1:], options.spec_width, options.loc_pulse, options.xwin, options.outfile, options.just_waterfall, \ - options.integrate_spec, options.integrate_ts, options.disp_pulse, tar = None)# make the sp plots +from presto.singlepulse.plot_spd import main if __name__ == '__main__': - main() + main() diff --git a/bin/psrfits2fil.py b/bin/psrfits2fil.py index 482a1ccff..92ec10195 100755 --- a/bin/psrfits2fil.py +++ b/bin/psrfits2fil.py @@ -30,7 +30,7 @@ def translate_header(psrfits_file): fil_header["data_type"] = 1 # filterbank fn = psrfits_file.filename - fil_header["rawdatafile"] = os.path.basename(fn) + fil_header["rawdatafile"] = os.path.basename(fn) fil_header["source_name"] = fits_hdr['SRC_NAME'] fil_header["barycentric"] = 0 # always not barycentered? fil_header["pulsarcentric"] = 0 # whats pulsarcentric? @@ -54,13 +54,13 @@ def translate_header(psrfits_file): fil_header["nifs"] = subint_hdr['NPOL'] return fil_header - + def main(fits_fn, outfn, nbits, \ apply_weights, apply_scales, apply_offsets): start = time.time() psrfits_file = psrfits.PsrfitsFile(fits_fn) - fil_header = translate_header(psrfits_file) + fil_header = translate_header(psrfits_file) fil_header['nbits'] = nbits outfil = filterbank.create_filterbank_file(outfn, fil_header, \ nbits=nbits) @@ -92,7 +92,7 @@ def main(fits_fn, outfn, nbits, \ print("\tScaling data by",1/scale_fac) print("\tValues larger than",new_max,"(pre-scaling) "\ "will be set to",2.0**nbits - 1,"\n") - + else: scale = False scale_fac = 1 @@ -107,14 +107,13 @@ def main(fits_fn, outfn, nbits, \ sys.stdout.flush() oldpcnt = "" for isub in range(int(psrfits_file.nsubints)): - subint = psrfits_file.read_subint(isub, \ - apply_weights, apply_scales, apply_offsets) + subint = psrfits_file.read_subint(isub, apply_weights, apply_scales, apply_offsets) if flip_band: subint = np.fliplr(subint) - subint /= scale_fac - outfil.append_spectra(subint) - pcnt = "%d" % (isub*100.0/psrfits_file.nsubints) - if pcnt != oldpcnt: + subint /= scale_fac + outfil.append_spectra(subint) + pcnt = "%d" % (isub*100.0/psrfits_file.nsubints) + if pcnt != oldpcnt: sys.stdout.write("% 4s%% complete\r" % pcnt) sys.stdout.flush() @@ -123,29 +122,30 @@ def main(fits_fn, outfn, nbits, \ print("Runtime:",time.time() - start) + if __name__=='__main__': - parser = optparse.OptionParser(prog='psrfits2fil.py', \ - version="v0.2 Paul Scholz, Patrick Lazarus (Sept 2012)", \ + parser = optparse.OptionParser(prog='psrfits2fil.py', + version="v0.2 Paul Scholz, Patrick Lazarus (Sept 2012)", usage = "usage: %prog [options] input_fits") parser.add_option("-n",dest='nbits', action='store', default=8, type='int', - help="The number of bits in the output .fil file. " +\ + help="The number of bits in the output .fil file. " + "Default=8") parser.add_option("-o",dest='outfn',action='store', - default=None, type='string', - help="The filename of the output filterbank file. " +\ + default=None, type='string', + help="The filename of the output filterbank file. " + "Default: same as .fits input but with .fil extn") - parser.add_option("--noweights", dest='apply_weights', \ - default=True, action="store_false", \ + parser.add_option("--noweights", dest='apply_weights', + default=True, action="store_false", help="Do not apply weights when converting data.") - parser.add_option("--noscales", dest='apply_scales', \ - default=True, action="store_false", \ + parser.add_option("--noscales", dest='apply_scales', + default=True, action="store_false", help="Do not apply scales when converting data.") - parser.add_option("--nooffsets", dest='apply_offsets', \ - default=True, action="store_false", \ + parser.add_option("--nooffsets", dest='apply_offsets', + default=True, action="store_false", help="Do not apply offsets when converting data.") (options, args) = parser.parse_args() - + fits_fn = args[0] if options.outfn: @@ -153,5 +153,5 @@ def main(fits_fn, outfn, nbits, \ else: outfn = '.'.join(fits_fn.split('.')[:-1]) + '.fil' - main(fits_fn, outfn, options.nbits, options.apply_weights, \ + main(fits_fn, outfn, options.nbits, options.apply_weights, options.apply_scales, options.apply_offsets) diff --git a/bin/pulsestack.py b/bin/pulsestack.py index 67a4d6af6..f1dbe601a 100755 --- a/bin/pulsestack.py +++ b/bin/pulsestack.py @@ -31,7 +31,7 @@ import optparse as opt import presto.infodata as inf import presto.polycos as poly - +from presto import sigproc # importing VariableColormap from kapteyn module of it exists try: @@ -46,87 +46,88 @@ # get the period from the polyco-file with the fileid=pid # using the best record in the polyco-file for the mjd=mjd def get_period (id, mjd): - return 1. / id.get_phs_and_freq(float(str(mjd).split(".")[0]), float("0."+str(mjd).split(".")[1]))[1] + return 1. / id.get_phs_and_freq(float(str(mjd).split(".")[0]), float("0."+str(mjd).split(".")[1]))[1] + # I took this part from the Scott's single_pulse_search.py def detrending (data, is_fast): - # Compute the file length in detrendlens - roundN = np.size(data)/detrendlen * detrendlen - data = data[:roundN] # here we redefining the arrat and loosing some samples - # Split the data into chunks for detrending - numblocks = roundN/detrendlen - data.shape = (numblocks, detrendlen) - stds = np.zeros(numblocks, dtype=np.float64) - # de-trend the data one chunk at a time and compute statistics - for ii, chunk in enumerate(data): - if is_fast: # use median removal instead of detrending (2x speedup) - tmpchunk = chunk.copy() - tmpchunk.sort() - med = tmpchunk[detrendlen/2] - chunk -= med - tmpchunk -= med - else: - # The detrend calls are the most expensive in the program - data[ii] = scipy.signal.detrend(chunk, type='linear') - tmpchunk = data[ii].copy() - tmpchunk.sort() - # The following gets rid of (hopefully) most of the - # outlying values (i.e. power dropouts and single pulses) - # If you throw out 5% (2.5% at bottom and 2.5% at top) - # of random gaussian deviates, the measured stdev is ~0.871 - # of the true stdev. Thus the 1.0/0.871=1.148 correction below. - # The following is roughly .std() since we already removed the median - stds[ii] = np.sqrt((tmpchunk[detrendlen/40:-detrendlen/40]**2.0).sum() / (0.95*detrendlen)) - - stds *= 1.148 - # sort the standard deviations and separate those with - # very low or very high values - sort_stds = stds.copy() - sort_stds.sort() - # identify the differences with the larges values (this - # will split off the chunks with very low and very high stds - locut = (sort_stds[1:numblocks/2+1] - sort_stds[:numblocks/2]).argmax() + 1 - hicut = (sort_stds[numblocks/2+1:] - sort_stds[numblocks/2:-1]).argmax() + numblocks/2 - 2 - std_stds = scipy.std(sort_stds[locut:hicut]) - median_stds = sort_stds[(locut+hicut)/2] - lo_std = median_stds - 4.0 * std_stds - hi_std = median_stds + 4.0 * std_stds - # Determine a list of "bad" chunks. We will not search these. - bad_blocks = np.nonzero((stds < lo_std) | (stds > hi_std))[0] - stds[bad_blocks] = median_stds - - # Now normalize all of the data and reshape it to 1-D - data /= stds[:,np.newaxis] - data.shape = (roundN,) - - return data + # Compute the file length in detrendlens + roundN = np.size(data)/detrendlen * detrendlen + data = data[:roundN] # here we redefining the arrat and loosing some samples + # Split the data into chunks for detrending + numblocks = roundN/detrendlen + data.shape = (numblocks, detrendlen) + stds = np.zeros(numblocks, dtype=np.float64) + # de-trend the data one chunk at a time and compute statistics + for ii, chunk in enumerate(data): + if is_fast: # use median removal instead of detrending (2x speedup) + tmpchunk = chunk.copy() + tmpchunk.sort() + med = tmpchunk[detrendlen/2] + chunk -= med + tmpchunk -= med + else: + # The detrend calls are the most expensive in the program + data[ii] = scipy.signal.detrend(chunk, type='linear') + tmpchunk = data[ii].copy() + tmpchunk.sort() + # The following gets rid of (hopefully) most of the + # outlying values (i.e. power dropouts and single pulses) + # If you throw out 5% (2.5% at bottom and 2.5% at top) + # of random gaussian deviates, the measured stdev is ~0.871 + # of the true stdev. Thus the 1.0/0.871=1.148 correction below. + # The following is roughly .std() since we already removed the median + stds[ii] = np.sqrt((tmpchunk[detrendlen/40:-detrendlen/40]**2.0).sum() / (0.95*detrendlen)) + + stds *= 1.148 + # sort the standard deviations and separate those with + # very low or very high values + sort_stds = stds.copy() + sort_stds.sort() + # identify the differences with the larges values (this + # will split off the chunks with very low and very high stds + locut = (sort_stds[1:numblocks/2+1] - sort_stds[:numblocks/2]).argmax() + 1 + hicut = (sort_stds[numblocks/2+1:] - sort_stds[numblocks/2:-1]).argmax() + numblocks/2 - 2 + std_stds = scipy.std(sort_stds[locut:hicut]) + median_stds = sort_stds[(locut+hicut)/2] + lo_std = median_stds - 4.0 * std_stds + hi_std = median_stds + 4.0 * std_stds + # Determine a list of "bad" chunks. We will not search these. + bad_blocks = np.nonzero((stds < lo_std) | (stds > hi_std))[0] + stds[bad_blocks] = median_stds + + # Now normalize all of the data and reshape it to 1-D + data /= stds[:,np.newaxis] + data.shape = (roundN,) + + return data # gives the short list of options without explanations def list_options(prg): - print("Usage: %s [options] <.dat OR .tim (use --tim option)>" % (prg)) - print() - print("Options:") - print(" [-h, --help] [-n, --nbins #BINS] [-p, --pulsar NAME]") - print(" [--polyco FILE] [--period PERIOD] [-b, --block SAMPLES]") - print(" [-s, --startphase PHASE] [-e, --endphase PHASE] [--start TIME]") - print(" [-w, --window TIME] [-f, --fast-detrend] [--no-detrend]") - print(" [-t, --timeseries] [-a, --rebin FACTOR] [-y, --profileonly]") - print(" [-k, --stacking] [--offset OFFSET] [-d, --dump #PULSES | TIME]") - print(" [--saveprof FILE] [--saveonly] [-i, --image FILEEXT]") - print(" [--tim] [--events] [-l, --list]") - print(" [-2, --double] [-m, --mjd MJD] [--tsamp TIME]") - print(" [--chandra]") - print() - print("Graphics Options:") - print(" [--fontsize SIZE] [--color COLOR] [--linestyle STYLE]") - print(" [--linewidth WIDTH] [--marker TYPE] [--markercolor COLOR]") - print(" [--markerwidth WIDTH] [--markersize SIZE] [--facecolor COLOR]") - print(" [--cmap COLORMAP] [-c, --colorbar] [--title STR]") - print(" [--legend STR] [--loc STR] [-g, --grid]") - print(" [--titlepos STR] [--label STR] [--labelpos STR]") - print(" [--no-top-axis] [--no-right-axis]") - if kapteyn_loaded: - print(" [--cmap-scale SCALE]") + print("Usage: %s [options] <.dat OR .tim (use --tim option)>" % (prg)) + print() + print("Options:") + print(" [-h, --help] [-n, --nbins #BINS] [-p, --pulsar NAME]") + print(" [--polyco FILE] [--period PERIOD] [-b, --block SAMPLES]") + print(" [-s, --startphase PHASE] [-e, --endphase PHASE] [--start TIME]") + print(" [-w, --window TIME] [-f, --fast-detrend] [--no-detrend]") + print(" [-t, --timeseries] [-a, --rebin FACTOR] [-y, --profileonly]") + print(" [-k, --stacking] [--offset OFFSET] [-d, --dump #PULSES | TIME]") + print(" [--saveprof FILE] [--saveonly] [-i, --image FILEEXT]") + print(" [--tim] [--events] [-l, --list]") + print(" [-2, --double] [-m, --mjd MJD] [--tsamp TIME]") + print(" [--chandra]") + print() + print("Graphics Options:") + print(" [--fontsize SIZE] [--color COLOR] [--linestyle STYLE]") + print(" [--linewidth WIDTH] [--marker TYPE] [--markercolor COLOR]") + print(" [--markerwidth WIDTH] [--markersize SIZE] [--facecolor COLOR]") + print(" [--cmap COLORMAP] [-c, --colorbar] [--title STR]") + print(" [--legend STR] [--loc STR] [-g, --grid]") + print(" [--titlepos STR] [--label STR] [--labelpos STR]") + print(" [--no-top-axis] [--no-right-axis]") + if kapteyn_loaded: + print(" [--cmap-scale SCALE]") ################################################################################################################################### @@ -137,101 +138,101 @@ def list_options(prg): # # Parsing the command line options # - usage = "Usage: %prog [options] <.dat OR .tim (use --tim option)>" - cmdline = opt.OptionParser(usage) - cmdline.add_option('-n', '--nbins', dest='nbins', metavar='#BINS', - help="number of phase bins per pulsar period (default: number of samples)", default=-1, type='int') - cmdline.add_option('-p', '--pulsar', dest='psrname', metavar='NAME', - help="pulsar name to be used for polyco instead of inf-file", type='str') - cmdline.add_option('--polyco', dest='polycofile', metavar='FILE', - help="polyco file to be used for folding (default: %default)", default="polyco.dat", type='str') - cmdline.add_option('--period', dest='period', metavar='PERIOD', - help="period in ms for folding. If not given then will use 'polyco.dat'", default=-1, type='float') - cmdline.add_option('-s', '--startphase', dest='phase_start', metavar='PHASE', - help="start phase to plot", default=0.0, type='float') - cmdline.add_option('-e', '--endphase', dest='phase_end', metavar='PHASE', - help="end phase to plot", default=1.0, type='float') - cmdline.add_option('--start', dest='start_time', metavar='TIME', - help="time offset from the start in seconds (default: %default)", default=0, type='float') - cmdline.add_option('-w', '--window', dest='window_time', metavar='TIME', - help="duration of the window in seconds (default: whole file)", default=-1, type='float') - cmdline.add_option('-b', '--block', dest='blocksize', metavar='SAMPLES', - help="size of the block for reading the dat-file when folding profile. When using polyco file " - "period is updated for every block. So, for very close binaries this block size " - "should be probably very small. The smallest is 3000. Default: %default", default=1000000, type='int') - cmdline.add_option('-f', '--fast-detrend', action="store_true", dest="fast_detrend", + usage = "Usage: %prog [options] <.dat OR .tim (use --tim option)>" + cmdline = opt.OptionParser(usage) + cmdline.add_option('-n', '--nbins', dest='nbins', metavar='#BINS', + help="number of phase bins per pulsar period (default: number of samples)", default=-1, type='int') + cmdline.add_option('-p', '--pulsar', dest='psrname', metavar='NAME', + help="pulsar name to be used for polyco instead of inf-file", type='str') + cmdline.add_option('--polyco', dest='polycofile', metavar='FILE', + help="polyco file to be used for folding (default: %default)", default="polyco.dat", type='str') + cmdline.add_option('--period', dest='period', metavar='PERIOD', + help="period in ms for folding. If not given then will use 'polyco.dat'", default=-1, type='float') + cmdline.add_option('-s', '--startphase', dest='phase_start', metavar='PHASE', + help="start phase to plot", default=0.0, type='float') + cmdline.add_option('-e', '--endphase', dest='phase_end', metavar='PHASE', + help="end phase to plot", default=1.0, type='float') + cmdline.add_option('--start', dest='start_time', metavar='TIME', + help="time offset from the start in seconds (default: %default)", default=0, type='float') + cmdline.add_option('-w', '--window', dest='window_time', metavar='TIME', + help="duration of the window in seconds (default: whole file)", default=-1, type='float') + cmdline.add_option('-b', '--block', dest='blocksize', metavar='SAMPLES', + help="size of the block for reading the dat-file when folding profile. When using polyco file " + "period is updated for every block. So, for very close binaries this block size " + "should be probably very small. The smallest is 3000. Default: %default", default=1000000, type='int') + cmdline.add_option('-f', '--fast-detrend', action="store_true", dest="fast_detrend", help="Use a faster method of de-trending the time-series (2x speedup). " "Sometimes (for strong pulsars (?), many strong pulses of which could effect calculation " "of the linear trend) this method produces even better results when only median is subtracted", default=False) - cmdline.add_option('--no-detrend', action="store_true", dest="is_no_detrend", - help="do not detrend the data", default=False) - cmdline.add_option('-a', '--rebin', dest='rebin', metavar='FACTOR', - help="averaging time series by FACTOR (default: %default)", default=1, type='int') - cmdline.add_option('-t', '--timeseries', action="store_true", dest="is_timeseries", - help="no folding. Time series will be plotted", default=False) - cmdline.add_option('-y', '--profileonly', action="store_true", dest="is_profileonly", - help="only plot the average profile. No stack of pulses or subints", default=False) - cmdline.add_option('-2', '--double', action="store_true", dest="is_period_doubled", - help="plot doubled-period profile (only when options -s and -e are not given)", default=False) - cmdline.add_option('-k', '--stacking', action="store_true", dest="is_stacking", - help="Plot series of pulses or subints in stacking mode. Default is grey-scale mode", default=False) - cmdline.add_option('-d', '--dump', dest='dump', metavar='#PULSES | TIME', - help="number of pulses or time (if . given) to dump for subintegrations", default='', type='str') - cmdline.add_option('--offset', dest='offset', metavar='OFFSET', - help="Offset between individual profiles in stacking mode. Default = %default. " - "Offset is in the same units as for profiles' flux density. " - "Only positive (>=0) offsets are allowed", default=1.0, type='float') - cmdline.add_option('--saveprof', dest='proffile', metavar='FILE', - help="save profile to binary file FILE", default='', type='str') - cmdline.add_option('--saveonly', action="store_true", dest="is_saveonly", - help="only saves png-file and exits", default=False) - cmdline.add_option('-i', '--image', dest='imageext', metavar='FILEEXT', - help="image file extension when used with --saveonly (default: %default)", default='png', type='str') - cmdline.add_option('--tim', action="store_true", dest="is_timfile", - help="input file is Sigproc-style tim-file. None inf-file is necessary in this case", default=False) - cmdline.add_option('-l', '--list', action="store_true", dest="is_printlist", - help="print short list of all options", default=False) - cmdline.add_option('--events', action="store_true", dest="is_events", - help="input file is ascii file with event time in secs. Must use --mjd option to provide the start MJD. " - "Number of bins _must_ be also given with --nbins option unless --timeseries is set", default=False) - cmdline.add_option('-m', '--mjd', dest='mjd', metavar='MJD', - help="start MJD of the data. By default, is read from .inf file or tim-file header. " - "If events file is used, it _must_ be given or --chandra is used", default='', type='str') - cmdline.add_option('--chandra', action="store_true", dest="is_chandra", - help="events file is Chandra file, so start MJD is set to 50814.0 (Chandra reference MJD)", default=False) - cmdline.add_option('--tsamp', dest='tsamp', metavar='TIME', - help="sampling time in sec of the data. By default, is read from .inf file or tim-file header", default='', type='str') - group = opt.OptionGroup(cmdline, "Graphics Options") - group.add_option('--fontsize', dest='fs', metavar='SIZE', - help="font size for labels (default: %default)", default=10, type='int') - group.add_option('--color', dest='color', metavar='COLOR', - help="line and marker color (default: %default)", default='green', type='str') - group.add_option('--linestyle', dest='linestyle', metavar='STYLE', - help="set linestyle. Default is '%default' (solid). " - "Other possible values: '--', '-.', ':', 'None', ' ', '' with possible combination " - "with 'steps', 'steps-pre', 'steps-mid', or 'steps-post'. In stacking mode only " - "'-', '--', '-.', ':' are possible, or their aliases: 'solid', 'dashed', 'dashdot', " - "and 'dotted'", default='-', type='str') - group.add_option('--linewidth', dest='linewidth', metavar='WIDTH', - help="set linewidth. Default is '%default'", default='1.0', type='float') - group.add_option('--marker', dest='marker', metavar='TYPE', - help="set line and marker color (default: %default). Use 'None' to not use the marker symbol", default=',', type='str') - group.add_option('--markercolor', dest='markercolor', metavar='COLOR', - help="set only marker color (default: %default)", default='green', type='str') - group.add_option('--markerwidth', dest='markerwidth', metavar='WIDTH', - help="set marker width (default: %default)", default='1.0', type='float') - group.add_option('--markersize', dest='markersize', metavar='SIZE', - help="set marker size (default: %default)", default='6.0', type='float') - group.add_option('--facecolor', dest='facecolor', metavar='COLOR', - help="marker facecolor or fill color in stacking mode (default: %default)", default='white', type='str') - group.add_option('--cmap', dest='colormap', metavar='COLORMAP', - help="set colormap for plotting pulse stack. Default: %default. " - "Other good colormaps: gray, gist_yarg (reversed gray), " - "gist_stern, hot, jet, pink, gist_heat, gist_gray, copper, ...", default='gist_earth', type='str') - if kapteyn_loaded: - group.add_option('--cmap-scale', dest='cmap_scaling', metavar='SCALE', - help="set colormap scaling. Default: %default. " - "Other possible values: SQRT, LOG, EXP, SQUARE", default='LINEAR', type='str') + cmdline.add_option('--no-detrend', action="store_true", dest="is_no_detrend", + help="do not detrend the data", default=False) + cmdline.add_option('-a', '--rebin', dest='rebin', metavar='FACTOR', + help="averaging time series by FACTOR (default: %default)", default=1, type='int') + cmdline.add_option('-t', '--timeseries', action="store_true", dest="is_timeseries", + help="no folding. Time series will be plotted", default=False) + cmdline.add_option('-y', '--profileonly', action="store_true", dest="is_profileonly", + help="only plot the average profile. No stack of pulses or subints", default=False) + cmdline.add_option('-2', '--double', action="store_true", dest="is_period_doubled", + help="plot doubled-period profile (only when options -s and -e are not given)", default=False) + cmdline.add_option('-k', '--stacking', action="store_true", dest="is_stacking", + help="Plot series of pulses or subints in stacking mode. Default is grey-scale mode", default=False) + cmdline.add_option('-d', '--dump', dest='dump', metavar='#PULSES | TIME', + help="number of pulses or time (if . given) to dump for subintegrations", default='', type='str') + cmdline.add_option('--offset', dest='offset', metavar='OFFSET', + help="Offset between individual profiles in stacking mode. Default = %default. " + "Offset is in the same units as for profiles' flux density. " + "Only positive (>=0) offsets are allowed", default=1.0, type='float') + cmdline.add_option('--saveprof', dest='proffile', metavar='FILE', + help="save profile to binary file FILE", default='', type='str') + cmdline.add_option('--saveonly', action="store_true", dest="is_saveonly", + help="only saves png-file and exits", default=False) + cmdline.add_option('-i', '--image', dest='imageext', metavar='FILEEXT', + help="image file extension when used with --saveonly (default: %default)", default='png', type='str') + cmdline.add_option('--tim', action="store_true", dest="is_timfile", + help="input file is Sigproc-style tim-file. None inf-file is necessary in this case", default=False) + cmdline.add_option('-l', '--list', action="store_true", dest="is_printlist", + help="print short list of all options", default=False) + cmdline.add_option('--events', action="store_true", dest="is_events", + help="input file is ascii file with event time in secs. Must use --mjd option to provide the start MJD. " + "Number of bins _must_ be also given with --nbins option unless --timeseries is set", default=False) + cmdline.add_option('-m', '--mjd', dest='mjd', metavar='MJD', + help="start MJD of the data. By default, is read from .inf file or tim-file header. " + "If events file is used, it _must_ be given or --chandra is used", default='', type='str') + cmdline.add_option('--chandra', action="store_true", dest="is_chandra", + help="events file is Chandra file, so start MJD is set to 50814.0 (Chandra reference MJD)", default=False) + cmdline.add_option('--tsamp', dest='tsamp', metavar='TIME', + help="sampling time in sec of the data. By default, is read from .inf file or tim-file header", default='', type='str') + group = opt.OptionGroup(cmdline, "Graphics Options") + group.add_option('--fontsize', dest='fs', metavar='SIZE', + help="font size for labels (default: %default)", default=10, type='int') + group.add_option('--color', dest='color', metavar='COLOR', + help="line and marker color (default: %default)", default='green', type='str') + group.add_option('--linestyle', dest='linestyle', metavar='STYLE', + help="set linestyle. Default is '%default' (solid). " + "Other possible values: '--', '-.', ':', 'None', ' ', '' with possible combination " + "with 'steps', 'steps-pre', 'steps-mid', or 'steps-post'. In stacking mode only " + "'-', '--', '-.', ':' are possible, or their aliases: 'solid', 'dashed', 'dashdot', " + "and 'dotted'", default='-', type='str') + group.add_option('--linewidth', dest='linewidth', metavar='WIDTH', + help="set linewidth. Default is '%default'", default='1.0', type='float') + group.add_option('--marker', dest='marker', metavar='TYPE', + help="set line and marker color (default: %default). Use 'None' to not use the marker symbol", default=',', type='str') + group.add_option('--markercolor', dest='markercolor', metavar='COLOR', + help="set only marker color (default: %default)", default='green', type='str') + group.add_option('--markerwidth', dest='markerwidth', metavar='WIDTH', + help="set marker width (default: %default)", default='1.0', type='float') + group.add_option('--markersize', dest='markersize', metavar='SIZE', + help="set marker size (default: %default)", default='6.0', type='float') + group.add_option('--facecolor', dest='facecolor', metavar='COLOR', + help="marker facecolor or fill color in stacking mode (default: %default)", default='white', type='str') + group.add_option('--cmap', dest='colormap', metavar='COLORMAP', + help="set colormap for plotting pulse stack. Default: %default. " + "Other good colormaps: gray, gist_yarg (reversed gray), " + "gist_stern, hot, jet, pink, gist_heat, gist_gray, copper, ...", default='gist_earth', type='str') + if kapteyn_loaded: + group.add_option('--cmap-scale', dest='cmap_scaling', metavar='SCALE', + help="set colormap scaling. Default: %default. " + "Other possible values: SQRT, LOG, EXP, SQUARE", default='LINEAR', type='str') group.add_option('-c', '--colorbar', action="store_true", dest="is_colorbar", help="plot colorbar when plotting pulse stack", default=False) group.add_option('--title', dest='title', metavar='STR', @@ -260,736 +261,738 @@ def list_options(prg): help="turn off the different labeling on the top axis", default=False) group.add_option('--no-right-axis', action="store_true", dest="is_no_right_axis", help="turn off the different labeling on the right axis", default=False) - cmdline.add_option_group(group) - - # reading cmd options - (opts,args) = cmdline.parse_args() - - # print short list of options - if opts.is_printlist: - list_options(sys.argv[0].split("/")[-1]) - sys.exit(0) - - # check if input file is given - if len(args) != 0: - datfile = args[0] - else: - #cmdline.print_help() - cmdline.print_usage() - sys.exit(0) - - # importing matplotlib - if opts.is_saveonly: - import matplotlib - matplotlib.use("Agg") - pngname = datfile.split(".dat")[0] + "." + opts.imageext - else: - import matplotlib - - import matplotlib.pyplot as plt - import matplotlib.ticker as ticker - import matplotlib.cm as cm - import matplotlib.collections as collections - import matplotlib.patches as patches - import matplotlib.font_manager as fm - - headersize = 0 # size in bytes of the header - # if input file is Sigproc-style tim-file - if opts.is_timfile: - try: - filhdr, headersize = sigproc.read_header(datfile) - startmjd = filhdr['tstart'] - tsamp = filhdr['tsamp'] - source = filhdr['source_name'] - except: - print("Error: Can't open the tim-file '%s'!" % (datfile,)) - sys.exit(1) - - elif opts.is_events: - opts.is_no_detrend = True # we don't do detrending for events - if opts.mjd == '' and not opts.is_chandra: - print("Error: for events' file start MJD _must_ be given with --mjd option or --chandra option!") - sys.exit(1) - if opts.nbins == -1: - print("Error: number of bins _must_ be given with --nbins option!") - sys.exit(1) - if opts.rebin != 1: - print("Event data can not be re-binned") - opts.rebin = 1 - else: - if (opts.mjd == '' and not opts.is_chandra) or opts.tsamp == '' or opts.psrname == '': - # reading inf-file to get corresponding info - inffile = datfile.split(".dat")[0] + ".inf" - try: - id = inf.infodata(inffile) - tsamp = id.dt # sampling time - startmjd = id.epoch # start MJD - source = id.object # pulsar name - except: - print("Error: Can't read the inf-file '%s'!" % (inffile,)) - sys.exit(1) - - # overwriting MJD, tsamp, pulsarname from the command line if given - if opts.is_chandra: - opts.mjd = "50814.0" - print("Chandra event file. Reference start MJD is %s" % (opts.mjd)) - if opts.mjd != '': - startmjd = float(opts.mjd) - if opts.tsamp != '': - tsamp = float(opts.tsamp) - if opts.psrname != '': - source = opts.psrname - - # checking start, end times and adjusting them if necessary - if opts.start_time < 0: - print("Error: start time %.3f is < 0!" % (opts.start_time)) - sys.exit(1) - - if not opts.is_events: - # getting the size of the file - try: - size=(os.stat(datfile)[stat.ST_SIZE] - headersize) / 4 # 4 bytes in float - except: - print("Error: Can't open the input file '%s'!" % (datfile,)) - sys.exit(1) - - # checking start, end times and adjusting them if necessary - start_sample = int(opts.start_time / tsamp) - if start_sample >= size: - print("Start time %.3f is out of range!" % (opts.start_time)) - sys.exit(1) - else: - size = size - start_sample - - # adjusting start MJD - opts.start_time = start_sample * tsamp - - if opts.window_time != -1: - window = int(opts.window_time / tsamp) - opts.window_time = window * tsamp - else: - window = size - opts.window_time = window * tsamp - - if start_sample + window > size: - print("End time %.3f is out of range. The window duration will be adjusted" % (opts.start_time + opts.window_time)) - window = size - start_sample - opts.window_time = window * tsamp - print("New window is %.3f s" % (opts.window_time)) - - else: # reading the file, and so, we can get the start and end events' time - events = np.loadtxt(datfile, comments='#', usecols=(0,0), dtype=float, unpack=True)[0] - try: - energy = np.loadtxt(datfile, comments='#', usecols=(1,1), dtype=float, unpack=True)[0] - except: - energy = [] - if np.size(energy) == 0 and opts.is_timeseries: - print("No energy column is given in the events'file '%s'. It _must_ be given to plot the timeseries!" % (datfile)) - sys.exit(1) - duration = events[-1]-events[0] - if opts.start_time >= duration: - print("Start time %.3f sec is more than duration of observation of %.3f sec!" % (opts.start_time, duration)) - sys.exit(1) - else: - events.compress(events >= opts.start_time + events[0]) - if opts.is_timeseries: - energy.compress(events >= opts.start_time + events[0]) - if opts.window_time == -1: - opts.window_time = duration - if opts.start_time + opts.window_time > duration: - print("End time %.3f is out of range. The window duration will be adjusted" % (opts.start_time + opts.window_time)) - opts.window_time = duration - opts.start_time - print("New window is %.3f s" % (opts.window_time)) - - # checking dump settings - if opts.dump != '': - if "." in opts.dump: # dump time is given - dump_time = float(opts.dump) - dump_pulses = 0 - if dump_time > 0: - print("Dump time is %.3f s" % (dump_time)) - else: - print("Dump time %.3f should be > 0!" % (dump_time)) - sys.exit(1) - else: # number of pulses to dump is given - dump_pulses = int(opts.dump) - dump_time = 0 - if dump_pulses > 0: - print("Number of pulses in subintegration is %d" % (dump_pulses)) - else: - print("Number of pulses in subintegration %d should be > 0!" % (dump_pulses)) - sys.exit(1) - else: - dump_pulses = 0 - dump_time = 0 - - # Checking the phases and correct if necessary - if not opts.is_timeseries: - if opts.phase_start < 0 or opts.phase_start >= 1.0: - opts.phase_start = 0.0 - print("Start phase is out of range. Adjusted value is %.3f" % (opts.phase_start,)) - - if opts.phase_end <= 0.0 or opts.phase_end > 1.0: - opts.phase_end = 1.0 - print("End phase is out of range. Adjusted value is %.3f" % (opts.phase_end,)) - - if opts.phase_end <= opts.phase_start: - print("End phase %.3f is <= than start phase %.3f! Please adjust." % (opts.phase_end, opts.phase_start)) - sys.exit(1) - - if not opts.is_no_detrend: - # checking the blocksize - if opts.blocksize < 3 * detrendlen: - print("Block size is too small: %d. Will be increased to %d." % (opts.blocksize, 3 * detrendlen)) - opts.blocksize = 3 * detrendlen - # making blocksize to be divisible by detrendlen (in order not to loose samples between blocks) - if int (opts.blocksize / detrendlen) * detrendlen != opts.blocksize: - opts.blocksize = detrendlen * (1 + int (opts.blocksize / detrendlen)) - print("Adjusting block size to %d to be divisible by detrendlen=%d" % (opts.blocksize, detrendlen)) - - - # forming the array of time samples - if not opts.is_events: - try: - infile = open(datfile, "rb") - except: - print("Error: Can't read the dat-file '%s'!" % (datfile,)) - sys.exit(1) - dataptr = ar.array('f') # 'f' - for float - infile.seek(headersize + 4 * start_sample) # position to the first byte to read; '4' - is the size of float - else: - data = events / 86400. # converting events to days - data += startmjd # converting to MJD - events -= events[0] # converting events' time relative to the start of observation - - - # Folding the profile - if not opts.is_timeseries: - if opts.period == -1: # Period is not given in the cmdline, so will use polyco file - pid=poly.polycos(source, opts.polycofile) - try: - if not opts.is_events: - fold_period = get_period (pid, startmjd) - else: - fold_period = get_period (pid, data[0]) - except: - print("Check the name of the pulsar in polyco file '%s' and inf-file '%s'!" % (opts.polycofile, inffile)) - print("If different, try --pulsar option to set the name of pulsar the same as in polyco file.") - sys.exit(1) - is_update_period = True - if fold_period <= 0: - print("Computed fold period is bad: %f. Check your polyco and/or MJD!" % (float(fold_period))) - sys.exit(1) - else: # period is given - fold_period = opts.period / 1000. - is_update_period = False - # if number of bins is not specified (should always be specified for events) - if opts.nbins == -1: - opts.nbins = int(fold_period / tsamp) - # if dump_time is given - checking that it is >= fold_period - if dump_time > 0: - if dump_time < fold_period: - print("Dump time %.3f s is less than folding period of %f s. Adjusting to match." % (dump_time, fold_period)) - dump_time = fold_period - print("Dump time is now %.3f s" % (dump_time)) - if dump_time > opts.window_time: - print("Dump time %.3f is more than window duration of %f s. Adjusting..." % (dump_time, opts.window_time)) - # we make it a one period less than duration, because otherwise plt.imshow - # fails to plot - dump_time = opts.window_time - fold_period - print("Dump time is now %.3f s" % (dump_time)) - else: # we need this for plotting purposes - dump_time = dump_pulses * fold_period - if dump_time > opts.window_time: - print("Number of pulses per subintegration %d is more than within window duration of %f s." % (dump_pulses, opts.window_time)) - print("Adjusting to match to the closest maximum possible number.") - dump_pulses = int((opts.window_time - fold_period) / fold_period) - dump_time = dump_pulses * fold_period - print("Number of pulses per subintegration is now %d" % (dump_pulses)) - - bin_start = int (opts.nbins * opts.phase_start) - bin_end = int(math.ceil((opts.nbins - 1) * opts.phase_end)) - bin_range = bin_end - bin_start + 1 - if "%f" % (opts.phase_start) != "%f" % (float(bin_start)/opts.nbins): - opts.phase_start = float(bin_start)/opts.nbins - print("Adjusting the start phase to %.3f to account for integer number of bins" % (opts.phase_start)) - if "%f" % (opts.phase_end) != "%f" % (float(bin_end)/(opts.nbins - 1)): - opts.phase_end = float(bin_end)/(opts.nbins - 1) - print("Adjusting the end phase to %.3f to account for integer number of bins" % (opts.phase_end)) - - # initializing some variables and arrays - elapsed_secs = opts.start_time - elapsed_turns = 0. - ibin = 0 - lbin = -1 - pulsecount = 0 - profile = np.zeros (bin_range, dtype=float) # profile array - counts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the profile - - if not opts.is_profileonly: # initialize the 2D array to keep the stack of pulses - if dump_pulses > 0: - npulses_expect = int(math.ceil(int((opts.window_time) / fold_period) / float(dump_pulses))) - elif dump_time > 0: - npulses_expect = int(math.ceil((opts.window_time) / dump_time)) - else: - npulses_expect = int(math.ceil((opts.window_time) / fold_period)) - # increase that value by 25% (overkill probably) in order to be safe if period is changing significantly - # over the course of observations - npulses_expect += int(0.25 * npulses_expect) - if npulses_expect == 1: - npulses_expect += 1 - pulsestack = np.zeros((npulses_expect, bin_range), dtype=float) - if dump_pulses > 0 or dump_time > 0: - dump = np.zeros (bin_range, dtype=float) # current subintegration - dumpcount = 0 # number of pulses in current subintegration - dump_next = elapsed_secs + dump_time # time of the next dump - dumpcounts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the subintegration - - # in case of events' file we always use dump_time - if opts.is_events: - if dump_time == 0: # always use dump_time - dump_time = fold_period - dumpcount = 0 # number of pulses in current subintegration - dump_next = elapsed_secs + dump_time # time of the next dump - dumpcounts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the subintegration - - - - if not opts.is_events: - # Loop over the chunks of data to read - while 1: - samplesleft = (headersize + (start_sample + window) * 4 - infile.tell()) / 4 - if samplesleft <= 0: # leaving from this infinite while loop - break - if samplesleft > opts.blocksize: - dataptr.fromfile(infile, opts.blocksize) - else: - dataptr.fromfile(infile, samplesleft) - data = np.array(dataptr) - dataptr = ar.array('f') # clear the array. This is important! - readsamples = np.size(data) # how many samples we have read - - # detrending the data - if not opts.is_no_detrend: - # this check is necessary when reading the very last block and if its size is _very_ small - # then we just have to skip it when detrending - if readsamples < 3 * detrendlen: - break - data = detrending (data, opts.fast_detrend) - size = np.size(data) - - # updating the period if necessary - if is_update_period: - fold_period = get_period (pid, startmjd + elapsed_secs / 86400.) - - # main folding loop over the read samples - turns = elapsed_turns - secs = elapsed_secs - for s in range(0, size): - # phase of each sample - phase=turns - math.floor(turns) - if phase >= opts.phase_start and phase <= opts.phase_end: - ibin = int (opts.nbins * phase) - bin_start - if ibin == opts.nbins: ibin -= 1 - if ibin - lbin < 0: pulsecount += 1 - counts[ibin] += 1. - profile[ibin] += data[s] - if not opts.is_profileonly: - # making the subintegration - if dump_pulses > 0 or dump_time > 0: - # if number of pulses to dump is given - if dump_pulses > 0 and ibin - lbin < 0 and pulsecount != 0 and pulsecount%dump_pulses == 0: - pulsestack[dumpcount] = np.array([dump[i]/(dumpcounts[i] == 0.0 and 1.0 or dumpcounts[i]) for i in range(0, bin_range)], dtype=float) - dumpcount += 1 - dumpcounts[:] = 0.0 - dump[:] = 0.0 - # if dump time is given - if dump_time > 0 and dump_pulses <= 0 and dump_next - secs <= tsamp/2.: - pulsestack[dumpcount] = np.array([dump[i]/(dumpcounts[i] == 0.0 and 1.0 or dumpcounts[i]) for i in range(0, bin_range)], dtype=float) - dumpcount += 1 - dumpcounts[:] = 0.0 - dump[:] = 0.0 - dump_next = secs + dump_time - - # after the dumps (if happened) we still need to continue with the current sample - # that belongs already to the next dump - dumpcounts[ibin] += 1. - dump[ibin] += data[s] - else: # no subintegrations - pulsestack[pulsecount][ibin] = data[s] - - turns += (tsamp / fold_period) - secs += tsamp - lbin = ibin - elapsed_secs += readsamples * tsamp - elapsed_turns += (readsamples * tsamp / fold_period) - - else: # if events are given - for tt in range(np.size(data)): - # updating the period if necessary - if is_update_period: - fold_period = get_period (pid, data[tt]) - turns = events[tt] / fold_period - # phase of each sample - phase=turns - math.floor(turns) - if phase >= opts.phase_start and phase <= opts.phase_end: - ibin = int (opts.nbins * phase) - bin_start - if ibin == opts.nbins: ibin -= 1 - counts[ibin] += 1. - if not opts.is_profileonly: - # if dump time is given (always use in case of events file) - if ((tt < np.size(data)-1 and dump_next < events[tt+1]) or tt == np.size(data)-1): - pulsestack[dumpcount] = dumpcounts - dumpcount += 1 - dumpcounts[:] = 0.0 - dump_next += dump_time - # after the dumps (if happened) we still need to continue with the current sample - # that belongs already to the next dump - dumpcounts[ibin] += 1. - - - # normalizing the profile - if not opts.is_events: - profile = np.array([profile[i]/(counts[i] == 0.0 and 1.0 or counts[i]) for i in range(0, bin_range)], dtype=float) - else: - profile = counts - # saving the profile to binary file - if opts.proffile != '': - outp = open(opts.proffile, 'wb') - outbins = ar.array('f') - outbins.fromlist(profile.tolist()) - outbins.tofile(outp) - outp.close() - - - else: - if not opts.is_events: - # if one wants just to plot time series, just read the whole selected block and then plot it - # If the size of window is too large, then probably Python won't be able to read it all at once - dataptr.fromfile(infile, window) - data = np.array(dataptr) - dataptr = ar.array('f') # clear the array. Though, here it's not important or necessary - # detrending the data - if not opts.is_no_detrend: - data = detrending (data, opts.fast_detrend) - window = np.size(data) - opts.window_time = window * tsamp - - - - - - - # - # Plotting - # + cmdline.add_option_group(group) + + # reading cmd options + (opts,args) = cmdline.parse_args() + + # print short list of options + if opts.is_printlist: + list_options(sys.argv[0].split("/")[-1]) + sys.exit(0) + + # check if input file is given + if len(args) != 0: + datfile = args[0] + else: + #cmdline.print_help() + cmdline.print_usage() + sys.exit(0) + + # importing matplotlib + if opts.is_saveonly: + import matplotlib + matplotlib.use("Agg") + pngname = datfile.split(".dat")[0] + "." + opts.imageext + else: + import matplotlib + + import matplotlib.pyplot as plt + import matplotlib.ticker as ticker + import matplotlib.cm as cm + import matplotlib.collections as collections + import matplotlib.font_manager as fm + + headersize = 0 # size in bytes of the header + # if input file is Sigproc-style tim-file + if opts.is_timfile: + try: + filhdr, headersize = sigproc.read_header(datfile) + startmjd = filhdr['tstart'] + tsamp = filhdr['tsamp'] + source = filhdr['source_name'] + except: + print("Error: Can't open the tim-file '%s'!" % (datfile,)) + sys.exit(1) + + elif opts.is_events: + opts.is_no_detrend = True # we don't do detrending for events + if opts.mjd == '' and not opts.is_chandra: + print("Error: for events' file start MJD _must_ be given with --mjd option or --chandra option!") + sys.exit(1) + if opts.nbins == -1: + print("Error: number of bins _must_ be given with --nbins option!") + sys.exit(1) + if opts.rebin != 1: + print("Event data can not be re-binned") + opts.rebin = 1 + else: + if (opts.mjd == '' and not opts.is_chandra) or opts.tsamp == '' or opts.psrname == '': + # reading inf-file to get corresponding info + inffile = datfile.split(".dat")[0] + ".inf" + try: + id = inf.infodata(inffile) + tsamp = id.dt # sampling time + startmjd = id.epoch # start MJD + source = id.object # pulsar name + except: + print("Error: Can't read the inf-file '%s'!" % (inffile,)) + sys.exit(1) + + # overwriting MJD, tsamp, pulsarname from the command line if given + if opts.is_chandra: + opts.mjd = "50814.0" + print("Chandra event file. Reference start MJD is %s" % (opts.mjd)) + if opts.mjd != '': + startmjd = float(opts.mjd) + if opts.tsamp != '': + tsamp = float(opts.tsamp) + if opts.psrname != '': + source = opts.psrname + + # checking start, end times and adjusting them if necessary + if opts.start_time < 0: + print("Error: start time %.3f is < 0!" % (opts.start_time)) + sys.exit(1) + + if not opts.is_events: + # getting the size of the file + try: + size=(os.stat(datfile)[stat.ST_SIZE] - headersize) / 4 # 4 bytes in float + except: + print("Error: Can't open the input file '%s'!" % (datfile,)) + sys.exit(1) + + # checking start, end times and adjusting them if necessary + start_sample = int(opts.start_time / tsamp) + if start_sample >= size: + print("Start time %.3f is out of range!" % (opts.start_time)) + sys.exit(1) + else: + size = size - start_sample + + # adjusting start MJD + opts.start_time = start_sample * tsamp + + if opts.window_time != -1: + window = int(opts.window_time / tsamp) + opts.window_time = window * tsamp + else: + window = size + opts.window_time = window * tsamp + + if start_sample + window > size: + print("End time %.3f is out of range. The window duration will be adjusted" % (opts.start_time + opts.window_time)) + window = size - start_sample + opts.window_time = window * tsamp + print("New window is %.3f s" % (opts.window_time)) + + else: # reading the file, and so, we can get the start and end events' time + events = np.loadtxt(datfile, comments='#', usecols=(0,0), dtype=float, unpack=True)[0] + try: + energy = np.loadtxt(datfile, comments='#', usecols=(1,1), dtype=float, unpack=True)[0] + except: + energy = [] + if np.size(energy) == 0 and opts.is_timeseries: + print("No energy column is given in the events'file '%s'. It _must_ be given to plot the timeseries!" % (datfile)) + sys.exit(1) + duration = events[-1]-events[0] + if opts.start_time >= duration: + print("Start time %.3f sec is more than duration of observation of %.3f sec!" % (opts.start_time, duration)) + sys.exit(1) + else: + events.compress(events >= opts.start_time + events[0]) + if opts.is_timeseries: + energy.compress(events >= opts.start_time + events[0]) + if opts.window_time == -1: + opts.window_time = duration + if opts.start_time + opts.window_time > duration: + print("End time %.3f is out of range. The window duration will be adjusted" % (opts.start_time + opts.window_time)) + opts.window_time = duration - opts.start_time + print("New window is %.3f s" % (opts.window_time)) + + # checking dump settings + if opts.dump != '': + if "." in opts.dump: # dump time is given + dump_time = float(opts.dump) + dump_pulses = 0 + if dump_time > 0: + print("Dump time is %.3f s" % (dump_time)) + else: + print("Dump time %.3f should be > 0!" % (dump_time)) + sys.exit(1) + else: # number of pulses to dump is given + dump_pulses = int(opts.dump) + dump_time = 0 + if dump_pulses > 0: + print("Number of pulses in subintegration is %d" % (dump_pulses)) + else: + print("Number of pulses in subintegration %d should be > 0!" % (dump_pulses)) + sys.exit(1) + else: + dump_pulses = 0 + dump_time = 0 + + # Checking the phases and correct if necessary + if not opts.is_timeseries: + if opts.phase_start < 0 or opts.phase_start >= 1.0: + opts.phase_start = 0.0 + print("Start phase is out of range. Adjusted value is %.3f" % (opts.phase_start,)) + + if opts.phase_end <= 0.0 or opts.phase_end > 1.0: + opts.phase_end = 1.0 + print("End phase is out of range. Adjusted value is %.3f" % (opts.phase_end,)) + + if opts.phase_end <= opts.phase_start: + print("End phase %.3f is <= than start phase %.3f! Please adjust." % (opts.phase_end, opts.phase_start)) + sys.exit(1) + + if not opts.is_no_detrend: + # checking the blocksize + if opts.blocksize < 3 * detrendlen: + print("Block size is too small: %d. Will be increased to %d." % (opts.blocksize, 3 * detrendlen)) + opts.blocksize = 3 * detrendlen + # making blocksize to be divisible by detrendlen (in order not to loose samples between blocks) + if int (opts.blocksize / detrendlen) * detrendlen != opts.blocksize: + opts.blocksize = detrendlen * (1 + int (opts.blocksize / detrendlen)) + print("Adjusting block size to %d to be divisible by detrendlen=%d" % (opts.blocksize, detrendlen)) + + + # forming the array of time samples + if not opts.is_events: + try: + infile = open(datfile, "rb") + except: + print("Error: Can't read the dat-file '%s'!" % (datfile,)) + sys.exit(1) + dataptr = ar.array('f') # 'f' - for float + infile.seek(headersize + 4 * start_sample) # position to the first byte to read; '4' - is the size of float + else: + data = events / 86400. # converting events to days + data += startmjd # converting to MJD + events -= events[0] # converting events' time relative to the start of observation + + + # Folding the profile + if not opts.is_timeseries: + if opts.period == -1: # Period is not given in the cmdline, so will use polyco file + pid=poly.polycos(source, opts.polycofile) + try: + if not opts.is_events: + fold_period = get_period (pid, startmjd) + else: + fold_period = get_period (pid, data[0]) + except: + print("Check the name of the pulsar in polyco file '%s' and inf-file '%s'!" % (opts.polycofile, inffile)) + print("If different, try --pulsar option to set the name of pulsar the same as in polyco file.") + sys.exit(1) + is_update_period = True + if fold_period <= 0: + print("Computed fold period is bad: %f. Check your polyco and/or MJD!" % (float(fold_period))) + sys.exit(1) + else: # period is given + fold_period = opts.period / 1000. + is_update_period = False + # if number of bins is not specified (should always be specified for events) + if opts.nbins == -1: + opts.nbins = int(fold_period / tsamp) + # if dump_time is given - checking that it is >= fold_period + if dump_time > 0: + if dump_time < fold_period: + print("Dump time %.3f s is less than folding period of %f s. Adjusting to match." % (dump_time, fold_period)) + dump_time = fold_period + print("Dump time is now %.3f s" % (dump_time)) + if dump_time > opts.window_time: + print("Dump time %.3f is more than window duration of %f s. Adjusting..." % (dump_time, opts.window_time)) + # we make it a one period less than duration, because otherwise plt.imshow + # fails to plot + dump_time = opts.window_time - fold_period + print("Dump time is now %.3f s" % (dump_time)) + else: # we need this for plotting purposes + dump_time = dump_pulses * fold_period + if dump_time > opts.window_time: + print("Number of pulses per subintegration %d is more than within window duration of %f s." % (dump_pulses, opts.window_time)) + print("Adjusting to match to the closest maximum possible number.") + dump_pulses = int((opts.window_time - fold_period) / fold_period) + dump_time = dump_pulses * fold_period + print("Number of pulses per subintegration is now %d" % (dump_pulses)) + + bin_start = int (opts.nbins * opts.phase_start) + bin_end = int(math.ceil((opts.nbins - 1) * opts.phase_end)) + bin_range = bin_end - bin_start + 1 + if "%f" % (opts.phase_start) != "%f" % (float(bin_start)/opts.nbins): + opts.phase_start = float(bin_start)/opts.nbins + print("Adjusting the start phase to %.3f to account for integer number of bins" % (opts.phase_start)) + if "%f" % (opts.phase_end) != "%f" % (float(bin_end)/(opts.nbins - 1)): + opts.phase_end = float(bin_end)/(opts.nbins - 1) + print("Adjusting the end phase to %.3f to account for integer number of bins" % (opts.phase_end)) + + # initializing some variables and arrays + elapsed_secs = opts.start_time + elapsed_turns = 0. + ibin = 0 + lbin = -1 + pulsecount = 0 + profile = np.zeros (bin_range, dtype=float) # profile array + counts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the profile + + if not opts.is_profileonly: # initialize the 2D array to keep the stack of pulses + if dump_pulses > 0: + npulses_expect = int(math.ceil(int((opts.window_time) / fold_period) / float(dump_pulses))) + elif dump_time > 0: + npulses_expect = int(math.ceil((opts.window_time) / dump_time)) + else: + npulses_expect = int(math.ceil((opts.window_time) / fold_period)) + # increase that value by 25% (overkill probably) in order to be safe if period is changing significantly + # over the course of observations + npulses_expect += int(0.25 * npulses_expect) + if npulses_expect == 1: + npulses_expect += 1 + pulsestack = np.zeros((npulses_expect, bin_range), dtype=float) + if dump_pulses > 0 or dump_time > 0: + dump = np.zeros (bin_range, dtype=float) # current subintegration + dumpcount = 0 # number of pulses in current subintegration + dump_next = elapsed_secs + dump_time # time of the next dump + dumpcounts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the subintegration + + # in case of events' file we always use dump_time + if opts.is_events: + if dump_time == 0: # always use dump_time + dump_time = fold_period + dumpcount = 0 # number of pulses in current subintegration + dump_next = elapsed_secs + dump_time # time of the next dump + dumpcounts = np.zeros (bin_range, dtype=float) # array that keeps number of counts in each bin of the subintegration + + + + if not opts.is_events: + # Loop over the chunks of data to read + while 1: + samplesleft = (headersize + (start_sample + window) * 4 - infile.tell()) / 4 + if samplesleft <= 0: # leaving from this infinite while loop + break + if samplesleft > opts.blocksize: + dataptr.fromfile(infile, opts.blocksize) + else: + dataptr.fromfile(infile, samplesleft) + data = np.array(dataptr) + dataptr = ar.array('f') # clear the array. This is important! + readsamples = np.size(data) # how many samples we have read + + # detrending the data + if not opts.is_no_detrend: + # this check is necessary when reading the very last block and if its size is _very_ small + # then we just have to skip it when detrending + if readsamples < 3 * detrendlen: + break + data = detrending (data, opts.fast_detrend) + size = np.size(data) + + # updating the period if necessary + if is_update_period: + fold_period = get_period (pid, startmjd + elapsed_secs / 86400.) + + # main folding loop over the read samples + turns = elapsed_turns + secs = elapsed_secs + for s in range(0, size): + # phase of each sample + phase=turns - math.floor(turns) + if phase >= opts.phase_start and phase <= opts.phase_end: + ibin = int (opts.nbins * phase) - bin_start + if ibin == opts.nbins: ibin -= 1 + if ibin - lbin < 0: pulsecount += 1 + counts[ibin] += 1. + profile[ibin] += data[s] + if not opts.is_profileonly: + # making the subintegration + if dump_pulses > 0 or dump_time > 0: + # if number of pulses to dump is given + if dump_pulses > 0 and ibin - lbin < 0 and pulsecount != 0 and pulsecount%dump_pulses == 0: + pulsestack[dumpcount] = np.array([dump[i]/(dumpcounts[i] == 0.0 and 1.0 or dumpcounts[i]) for i in range(0, bin_range)], dtype=float) + dumpcount += 1 + dumpcounts[:] = 0.0 + dump[:] = 0.0 + # if dump time is given + if dump_time > 0 and dump_pulses <= 0 and dump_next - secs <= tsamp/2.: + pulsestack[dumpcount] = np.array([dump[i]/(dumpcounts[i] == 0.0 and 1.0 or dumpcounts[i]) for i in range(0, bin_range)], dtype=float) + dumpcount += 1 + dumpcounts[:] = 0.0 + dump[:] = 0.0 + dump_next = secs + dump_time + + # after the dumps (if happened) we still need to continue with the current sample + # that belongs already to the next dump + dumpcounts[ibin] += 1. + dump[ibin] += data[s] + else: # no subintegrations + pulsestack[pulsecount][ibin] = data[s] + + turns += (tsamp / fold_period) + secs += tsamp + lbin = ibin + elapsed_secs += readsamples * tsamp + elapsed_turns += (readsamples * tsamp / fold_period) + + else: # if events are given + for tt in range(np.size(data)): + # updating the period if necessary + if is_update_period: + fold_period = get_period (pid, data[tt]) + turns = events[tt] / fold_period + # phase of each sample + phase=turns - math.floor(turns) + if phase >= opts.phase_start and phase <= opts.phase_end: + ibin = int (opts.nbins * phase) - bin_start + if ibin == opts.nbins: ibin -= 1 + counts[ibin] += 1. + if not opts.is_profileonly: + # if dump time is given (always use in case of events file) + if ((tt < np.size(data)-1 and dump_next < events[tt+1]) or tt == np.size(data)-1): + pulsestack[dumpcount] = dumpcounts + dumpcount += 1 + dumpcounts[:] = 0.0 + dump_next += dump_time + # after the dumps (if happened) we still need to continue with the current sample + # that belongs already to the next dump + dumpcounts[ibin] += 1. + + + # normalizing the profile + if not opts.is_events: + profile = np.array([profile[i]/(counts[i] == 0.0 and 1.0 or counts[i]) for i in range(0, bin_range)], dtype=float) + else: + profile = counts + # saving the profile to binary file + if opts.proffile != '': + outp = open(opts.proffile, 'wb') + outbins = ar.array('f') + outbins.fromlist(profile.tolist()) + outbins.tofile(outp) + outp.close() + + + else: + if not opts.is_events: + # if one wants just to plot time series, just read the whole selected block and then plot it + # If the size of window is too large, then probably Python won't be able to read it all at once + dataptr.fromfile(infile, window) + data = np.array(dataptr) + dataptr = ar.array('f') # clear the array. Though, here it's not important or necessary + # detrending the data + if not opts.is_no_detrend: + data = detrending (data, opts.fast_detrend) + window = np.size(data) + opts.window_time = window * tsamp + + # + # Plotting + # fig = plt.figure() - if opts.is_timeseries: # plotting the time series - if opts.rebin == 1: - if not opts.is_events: - flux = data - else: - flux = energy - else: - window = int(window/opts.rebin) - tsamp *= opts.rebin - flux = [np.average(data[k*opts.rebin:(k+1)*opts.rebin]) for k in range(0, window)] - - if not opts.is_events: - time = [opts.start_time + tsamp * t for t in range(0, window)] - else: - time = events - - ax = fig.add_subplot(111) - plt.xlabel("Time (s)", fontsize=opts.fs) - if opts.is_events: - plt.ylabel("Energy", fontsize=opts.fs) - elif opts.is_no_detrend: - plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) - else: - plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) - - ax.plot (time, flux, color="%s" % (opts.color), marker="%s" % (opts.marker), markeredgecolor="%s" % (opts.markercolor), markerfacecolor="%s" % (opts.facecolor), linestyle="%s" % (opts.linestyle), linewidth=opts.linewidth, markeredgewidth=opts.markerwidth, markersize=opts.markersize, label="%s" % ("\n".join(opts.legend.split("\\n")))) - # turn on grid - if opts.is_grid == True: - plt.grid(True) - - if not opts.is_no_top_axis: - if not opts.is_events: - axtop = plt.twiny() - axtop.xaxis.tick_top() - axtop.xaxis.set_label_position("top") - axtop.set_xlim(xmin=start_sample, xmax=start_sample+window-1) - for label in axtop.get_xticklabels(): label.set_fontsize(opts.fs) - plt.xlabel("Samples", fontsize=opts.fs) - plt.gca().minorticks_on() - - ax.set_xlim(xmin=time[0], xmax=time[-1]) - ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) - ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) - ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) - ax.yaxis.set_minor_locator(ticker.AutoMinorLocator()) - for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) - for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) - - elif opts.is_profileonly: # plotting only the profile - - flux = profile - phase = [float(n)/opts.nbins for n in range(bin_start, bin_end + 1)] - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - flux = np.append(flux, profile) - phase = np.append(phase, [float(opts.nbins + n)/opts.nbins for n in range(bin_start, bin_end + 1)]) - - ax = fig.add_subplot(111) - plt.xlabel("Pulse phase", fontsize=opts.fs) - if opts.is_events: - plt.ylabel("Counts/bin", fontsize=opts.fs) - elif opts.is_no_detrend: - plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) - else: - plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) - - ax.plot (phase, flux, color="%s" % (opts.color), marker="%s" % (opts.marker), markeredgecolor="%s" % (opts.markercolor), markerfacecolor="%s" % (opts.facecolor), linestyle="%s" % (opts.linestyle), linewidth=opts.linewidth, markeredgewidth=opts.markerwidth, markersize=opts.markersize, label="%s" % ("\n".join(opts.legend.split("\\n")))) - # turn on grid - if opts.is_grid == True: - plt.grid(True) - - if not opts.is_no_top_axis: - axtop = plt.twiny() - axtop.xaxis.tick_top() - axtop.xaxis.set_label_position("top") - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - axtop.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end) - else: - axtop.set_xlim(xmin=bin_start, xmax=bin_end) - for label in axtop.get_xticklabels(): label.set_fontsize(opts.fs) - plt.xlabel("Phase bin", fontsize=opts.fs) - plt.gca().minorticks_on() - - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - ax.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) - else: - ax.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) - ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) - ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) - ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) - ax.yaxis.set_minor_locator(ticker.AutoMinorLocator()) - for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) - for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) - - elif opts.is_stacking: # plotting the stack of pulses or subints - - ax = fig.add_subplot(111) - if not opts.is_no_top_axis: - plt.xlabel("Phase bin", fontsize=opts.fs) - if not opts.is_no_right_axis: - if opts.is_events: - plt.ylabel("Counts/bin", fontsize=opts.fs) - elif opts.is_no_detrend: - plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) - else: - plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) - - dr = abs(opts.offset) - if dump_pulses > 0 or dump_time > 0: - ncount = dumpcount - else: - ncount = pulsecount - ymin = pulsestack[0:ncount].min() - ymax = pulsestack[0:ncount].max() - dr = abs(opts.offset) - dmins = [] - dmaxs = [] - - t = np.arange(bin_start, bin_end + 1, dtype=float) - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - t = np.append(t, [opts.nbins + n for n in range(bin_start, bin_end + 1)]) - pulseverts = [] - for i in np.arange(ncount-1, -1, -1): - temp = pulsestack[i] + i * dr - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - temp = np.append(temp, temp) - dmins.append(temp.min()) - dmaxs.append(temp.max()) - pulseverts.append(np.vstack((np.hstack((t[:,np.newaxis], temp[:,np.newaxis])), [t[-1], ymin-1], [0, ymin-1]))) - - ymin = np.array(dmins).min() - ymax = np.array(dmaxs).max() - polys = collections.PolyCollection(pulseverts, closed=True) - polys.set_edgecolor("%s" % (opts.color)) - polys.set_facecolor("%s" % (opts.facecolor)) - polys.set_linestyle("%s" % (opts.linestyle)) - polys.set_linewidth("%s" % (opts.linewidth)) - ax.add_collection(polys) - - plt.gca().minorticks_on() - - # turn on grid - if opts.is_grid == True: - plt.grid(True) - - axbot = plt.twiny() - axbot.xaxis.tick_bottom() - axbot.xaxis.set_label_position("bottom") - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - axbot.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) - else: - axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) - for label in axbot.get_xticklabels(): label.set_fontsize(opts.fs) - plt.xlabel("Pulse phase", fontsize=opts.fs) - plt.gca().minorticks_on() - - ayleft = plt.twinx() - ayleft.yaxis.tick_left() - ayleft.yaxis.set_label_position("left") - if (dump_pulses > 0 or dump_time > 0) and not opts.is_events: - ayleft.set_ylim(ymin=0.0, ymax=ncount*dump_time) - else: - ayleft.set_ylim(ymin=0.0, ymax=opts.window_time) - for label in ayleft.get_yticklabels(): label.set_fontsize(opts.fs) - plt.ylabel("Observing time (s)", fontsize=opts.fs) - plt.gca().minorticks_on() - - # Determining the main (top/right) axes - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end-0.1) - else: - ax.set_xlim(xmin=bin_start, xmax=bin_end-0.1) - ax.set_ylim(ymin=ymin, ymax=ymax) - if not opts.is_no_top_axis: - ax.xaxis.tick_top() - ax.xaxis.set_label_position("top") - for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) - plt.gca().minorticks_on() - else: # if don't want to show then turning everything off (I can not duplicate the limits and + if opts.is_timeseries: # plotting the time series + if opts.rebin == 1: + if not opts.is_events: + flux = data + else: + flux = energy + else: + window = int(window/opts.rebin) + tsamp *= opts.rebin + flux = [np.average(data[k*opts.rebin:(k+1)*opts.rebin]) for k in range(0, window)] + + if not opts.is_events: + time = [opts.start_time + tsamp * t for t in range(0, window)] + else: + time = events + + ax = fig.add_subplot(111) + plt.xlabel("Time (s)", fontsize=opts.fs) + if opts.is_events: + plt.ylabel("Energy", fontsize=opts.fs) + elif opts.is_no_detrend: + plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) + else: + plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) + + ax.plot (time, flux, color="%s" % (opts.color), marker="%s" % (opts.marker), + markeredgecolor="%s" % (opts.markercolor), markerfacecolor="%s" % (opts.facecolor), + linestyle="%s" % (opts.linestyle), linewidth=opts.linewidth, markeredgewidth=opts.markerwidth, + markersize=opts.markersize, label="%s" % ("\n".join(opts.legend.split("\\n")))) + # turn on grid + if opts.is_grid: + plt.grid(True) + + if not opts.is_no_top_axis: + if not opts.is_events: + axtop = plt.twiny() + axtop.xaxis.tick_top() + axtop.xaxis.set_label_position("top") + axtop.set_xlim(xmin=start_sample, xmax=start_sample+window-1) + for label in axtop.get_xticklabels(): label.set_fontsize(opts.fs) + plt.xlabel("Samples", fontsize=opts.fs) + plt.gca().minorticks_on() + + ax.set_xlim(xmin=time[0], xmax=time[-1]) + ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) + ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) + ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) + ax.yaxis.set_minor_locator(ticker.AutoMinorLocator()) + for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) + for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) + + elif opts.is_profileonly: # plotting only the profile + + flux = profile + phase = [float(n)/opts.nbins for n in range(bin_start, bin_end + 1)] + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + flux = np.append(flux, profile) + phase = np.append(phase, [float(opts.nbins + n)/opts.nbins for n in range(bin_start, bin_end + 1)]) + + ax = fig.add_subplot(111) + plt.xlabel("Pulse phase", fontsize=opts.fs) + if opts.is_events: + plt.ylabel("Counts/bin", fontsize=opts.fs) + elif opts.is_no_detrend: + plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) + else: + plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) + + ax.plot (phase, flux, color="%s" % (opts.color), marker="%s" % (opts.marker), + markeredgecolor="%s" % (opts.markercolor), markerfacecolor="%s" % (opts.facecolor), + linestyle="%s" % (opts.linestyle), linewidth=opts.linewidth, markeredgewidth=opts.markerwidth, + markersize=opts.markersize, label="%s" % ("\n".join(opts.legend.split("\\n")))) + # turn on grid + if opts.is_grid == True: + plt.grid(True) + + if not opts.is_no_top_axis: + axtop = plt.twiny() + axtop.xaxis.tick_top() + axtop.xaxis.set_label_position("top") + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + axtop.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end) + else: + axtop.set_xlim(xmin=bin_start, xmax=bin_end) + for label in axtop.get_xticklabels(): label.set_fontsize(opts.fs) + plt.xlabel("Phase bin", fontsize=opts.fs) + plt.gca().minorticks_on() + + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + ax.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) + else: + ax.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) + ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) + ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%g")) + ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) + ax.yaxis.set_minor_locator(ticker.AutoMinorLocator()) + for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) + for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) + + elif opts.is_stacking: # plotting the stack of pulses or subints + + ax = fig.add_subplot(111) + if not opts.is_no_top_axis: + plt.xlabel("Phase bin", fontsize=opts.fs) + if not opts.is_no_right_axis: + if opts.is_events: + plt.ylabel("Counts/bin", fontsize=opts.fs) + elif opts.is_no_detrend: + plt.ylabel("Flux density (arb. units)", fontsize=opts.fs) + else: + plt.ylabel("Flux density ($\sigma$)", fontsize=opts.fs) + + dr = abs(opts.offset) + if dump_pulses > 0 or dump_time > 0: + ncount = dumpcount + else: + ncount = pulsecount + ymin = pulsestack[0:ncount].min() + ymax = pulsestack[0:ncount].max() + dr = abs(opts.offset) + dmins = [] + dmaxs = [] + + t = np.arange(bin_start, bin_end + 1, dtype=float) + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + t = np.append(t, [opts.nbins + n for n in range(bin_start, bin_end + 1)]) + pulseverts = [] + for i in np.arange(ncount-1, -1, -1): + temp = pulsestack[i] + i * dr + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + temp = np.append(temp, temp) + dmins.append(temp.min()) + dmaxs.append(temp.max()) + pulseverts.append(np.vstack((np.hstack((t[:,np.newaxis], temp[:,np.newaxis])), [t[-1], ymin-1], [0, ymin-1]))) + + ymin = np.array(dmins).min() + ymax = np.array(dmaxs).max() + polys = collections.PolyCollection(pulseverts, closed=True) + polys.set_edgecolor("%s" % (opts.color)) + polys.set_facecolor("%s" % (opts.facecolor)) + polys.set_linestyle("%s" % (opts.linestyle)) + polys.set_linewidth("%s" % (opts.linewidth)) + ax.add_collection(polys) + + plt.gca().minorticks_on() + + # turn on grid + if opts.is_grid: + plt.grid(True) + + axbot = plt.twiny() + axbot.xaxis.tick_bottom() + axbot.xaxis.set_label_position("bottom") + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + axbot.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) + else: + axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) + for label in axbot.get_xticklabels(): label.set_fontsize(opts.fs) + plt.xlabel("Pulse phase", fontsize=opts.fs) + plt.gca().minorticks_on() + + ayleft = plt.twinx() + ayleft.yaxis.tick_left() + ayleft.yaxis.set_label_position("left") + if (dump_pulses > 0 or dump_time > 0) and not opts.is_events: + ayleft.set_ylim(ymin=0.0, ymax=ncount*dump_time) + else: + ayleft.set_ylim(ymin=0.0, ymax=opts.window_time) + for label in ayleft.get_yticklabels(): label.set_fontsize(opts.fs) + plt.ylabel("Observing time (s)", fontsize=opts.fs) + plt.gca().minorticks_on() + + # Determining the main (top/right) axes + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end-0.1) + else: + ax.set_xlim(xmin=bin_start, xmax=bin_end-0.1) + ax.set_ylim(ymin=ymin, ymax=ymax) + if not opts.is_no_top_axis: + ax.xaxis.tick_top() + ax.xaxis.set_label_position("top") + for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) + plt.gca().minorticks_on() + else: # if don't want to show then turning everything off (I can not duplicate the limits and + # ticks from the bottom axis because then imshow won't work properly + ax.xaxis.set_major_formatter(ticker.NullFormatter()) + ax.xaxis.set_major_locator(ticker.NullLocator()) + ax.xaxis.set_minor_locator(ticker.NullLocator()) + + if not opts.is_no_right_axis: + ax.yaxis.tick_right() + ax.yaxis.set_label_position("right") + for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) + plt.gca().minorticks_on() + else: # if don't want to show then turning everything off (I can not duplicate the limits and + # ticks from the left axis because then imshow won't work properly + ax.yaxis.set_major_formatter(ticker.NullFormatter()) + ax.yaxis.set_major_locator(ticker.NullLocator()) + ax.yaxis.set_minor_locator(ticker.NullLocator()) + + + else: # plotting the stack of pulses or subints in grey-scale mode + + ax = fig.add_subplot(111) + if not opts.is_no_top_axis: + plt.xlabel("Phase bin", fontsize=opts.fs) + + if not kapteyn_loaded: + colormap = cm.get_cmap(opts.colormap) + else: + colormap=VariableColormap(cm.get_cmap(opts.colormap)) + colormap.set_scale(opts.cmap_scaling) + + if dump_pulses > 0 or dump_time > 0: + ncount = dumpcount + if not opts.is_no_right_axis: plt.ylabel("Sub-integration", fontsize=opts.fs) + else: + ncount = pulsecount + if not opts.is_no_right_axis: plt.ylabel("Pulse number", fontsize=opts.fs) + + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + pulsestack = np.concatenate((pulsestack[:ncount], pulsestack[:ncount]), axis=1) + cax = ax.imshow(pulsestack[:ncount], interpolation=None, aspect='auto', extent=(bin_start, opts.nbins + bin_end, 0, ncount), origin='lower', cmap=colormap) + else: + cax = ax.imshow(pulsestack[:ncount], interpolation=None, aspect='auto', extent=(bin_start, bin_end, 0, ncount), origin='lower', cmap=colormap) + if opts.is_colorbar: + cbar = fig.colorbar(cax, orientation='horizontal', spacing='proportional') + if opts.is_events: + cbar.ax.set_xlabel("Counts/bin", fontsize=opts.fs) + elif opts.is_no_detrend: + cbar.ax.set_xlabel("Flux density (arb. units)", fontsize=opts.fs) + else: + cbar.ax.set_xlabel("Flux density ($\sigma$)", fontsize=opts.fs) + for label in cbar.ax.get_xticklabels(): label.set_fontsize(opts.fs) + + plt.gca().minorticks_on() + + # turn on grid + if opts.is_grid == True: + plt.grid(True) + + axbot = plt.twiny() + axbot.xaxis.tick_bottom() + axbot.xaxis.set_label_position("bottom") + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + axbot.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) + else: + axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) + for label in axbot.get_xticklabels(): label.set_fontsize(opts.fs) + plt.xlabel("Pulse phase", fontsize=opts.fs) + plt.gca().minorticks_on() + + ayleft = plt.twinx() + ayleft.yaxis.tick_left() + ayleft.yaxis.set_label_position("left") + if (dump_pulses > 0 or dump_time > 0) and not opts.is_events: + ayleft.set_ylim(ymin=0.0, ymax=ncount*dump_time) + else: + ayleft.set_ylim(ymin=0.0, ymax=opts.window_time) + for label in ayleft.get_yticklabels(): label.set_fontsize(opts.fs) + plt.ylabel("Observing time (s)", fontsize=opts.fs) + plt.gca().minorticks_on() + + # Determining the main (top/right) axes + if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: + ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end) + else: + ax.set_xlim(xmin=bin_start, xmax=bin_end) + ax.set_ylim(ymin=0, ymax=ncount) + if not opts.is_no_top_axis: + ax.xaxis.tick_top() + ax.xaxis.set_label_position("top") + for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) + plt.gca().minorticks_on() + else: # if don't want to show then turning everything off (I can not duplicate the limits and # ticks from the bottom axis because then imshow won't work properly - ax.xaxis.set_major_formatter(ticker.NullFormatter()) - ax.xaxis.set_major_locator(ticker.NullLocator()) - ax.xaxis.set_minor_locator(ticker.NullLocator()) - - if not opts.is_no_right_axis: - ax.yaxis.tick_right() - ax.yaxis.set_label_position("right") - for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) - plt.gca().minorticks_on() - else: # if don't want to show then turning everything off (I can not duplicate the limits and + ax.xaxis.set_major_formatter(ticker.NullFormatter()) + ax.xaxis.set_major_locator(ticker.NullLocator()) + ax.xaxis.set_minor_locator(ticker.NullLocator()) + + if not opts.is_no_right_axis: + ax.yaxis.tick_right() + ax.yaxis.set_label_position("right") + for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) + plt.gca().minorticks_on() + else: # if don't want to show then turning everything off (I can not duplicate the limits and # ticks from the left axis because then imshow won't work properly - ax.yaxis.set_major_formatter(ticker.NullFormatter()) - ax.yaxis.set_major_locator(ticker.NullLocator()) - ax.yaxis.set_minor_locator(ticker.NullLocator()) - - - else: # plotting the stack of pulses or subints in grey-scale mode - - ax = fig.add_subplot(111) - if not opts.is_no_top_axis: - plt.xlabel("Phase bin", fontsize=opts.fs) - - if not kapteyn_loaded: - colormap = cm.get_cmap(opts.colormap) - else: - colormap=VariableColormap(cm.get_cmap(opts.colormap)) - colormap.set_scale(opts.cmap_scaling) - - if dump_pulses > 0 or dump_time > 0: - ncount = dumpcount - if not opts.is_no_right_axis: plt.ylabel("Sub-integration", fontsize=opts.fs) - else: - ncount = pulsecount - if not opts.is_no_right_axis: plt.ylabel("Pulse number", fontsize=opts.fs) - - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - pulsestack = np.concatenate((pulsestack[:ncount], pulsestack[:ncount]), axis=1) - cax = ax.imshow(pulsestack[:ncount], interpolation=None, aspect='auto', extent=(bin_start, opts.nbins + bin_end, 0, ncount), origin='lower', cmap=colormap) - else: - cax = ax.imshow(pulsestack[:ncount], interpolation=None, aspect='auto', extent=(bin_start, bin_end, 0, ncount), origin='lower', cmap=colormap) - if opts.is_colorbar: - cbar = fig.colorbar(cax, orientation='horizontal', spacing='proportional') - if opts.is_events: - cbar.ax.set_xlabel("Counts/bin", fontsize=opts.fs) - elif opts.is_no_detrend: - cbar.ax.set_xlabel("Flux density (arb. units)", fontsize=opts.fs) - else: - cbar.ax.set_xlabel("Flux density ($\sigma$)", fontsize=opts.fs) - for label in cbar.ax.get_xticklabels(): label.set_fontsize(opts.fs) - - plt.gca().minorticks_on() - - # turn on grid - if opts.is_grid == True: - plt.grid(True) - - axbot = plt.twiny() - axbot.xaxis.tick_bottom() - axbot.xaxis.set_label_position("bottom") - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - axbot.set_xlim(xmin=opts.phase_start, xmax=2.*opts.phase_end) - else: - axbot.set_xlim(xmin=opts.phase_start, xmax=opts.phase_end) - for label in axbot.get_xticklabels(): label.set_fontsize(opts.fs) - plt.xlabel("Pulse phase", fontsize=opts.fs) - plt.gca().minorticks_on() - - ayleft = plt.twinx() - ayleft.yaxis.tick_left() - ayleft.yaxis.set_label_position("left") - if (dump_pulses > 0 or dump_time > 0) and not opts.is_events: - ayleft.set_ylim(ymin=0.0, ymax=ncount*dump_time) - else: - ayleft.set_ylim(ymin=0.0, ymax=opts.window_time) - for label in ayleft.get_yticklabels(): label.set_fontsize(opts.fs) - plt.ylabel("Observing time (s)", fontsize=opts.fs) - plt.gca().minorticks_on() - - # Determining the main (top/right) axes - if opts.is_period_doubled and bin_start == 0 and bin_end == opts.nbins - 1: - ax.set_xlim(xmin=bin_start, xmax=opts.nbins + bin_end) - else: - ax.set_xlim(xmin=bin_start, xmax=bin_end) - ax.set_ylim(ymin=0, ymax=ncount) - if not opts.is_no_top_axis: - ax.xaxis.tick_top() - ax.xaxis.set_label_position("top") - for label in ax.get_xticklabels(): label.set_fontsize(opts.fs) - plt.gca().minorticks_on() - else: # if don't want to show then turning everything off (I can not duplicate the limits and - # ticks from the bottom axis because then imshow won't work properly - ax.xaxis.set_major_formatter(ticker.NullFormatter()) - ax.xaxis.set_major_locator(ticker.NullLocator()) - ax.xaxis.set_minor_locator(ticker.NullLocator()) - - if not opts.is_no_right_axis: - ax.yaxis.tick_right() - ax.yaxis.set_label_position("right") - for label in ax.get_yticklabels(): label.set_fontsize(opts.fs) - plt.gca().minorticks_on() - else: # if don't want to show then turning everything off (I can not duplicate the limits and - # ticks from the left axis because then imshow won't work properly - ax.yaxis.set_major_formatter(ticker.NullFormatter()) - ax.yaxis.set_major_locator(ticker.NullLocator()) - ax.yaxis.set_minor_locator(ticker.NullLocator()) - - - # Making the title - if opts.title != '': - ax.set_title("\n".join(opts.title.split("\\n")), fontsize=opts.fs, x=opts.titlepos.split(",")[0], y=opts.titlepos.split(",")[1], ha='%s' % (opts.titlepos.split(",")[-1])) - - # Putting the label - if opts.label != '': - ax.annotate("\n".join(opts.label.split("\\n")), fontsize=opts.fs, xycoords='axes fraction', xy=(0,0), xytext=(opts.labelpos.split(",")[0], opts.labelpos.split(",")[1]), ha='%s' % (opts.labelpos.split(",")[-1])) - - # turn on the Legend (only when is used in profile-only mode or timeseries mode) - if opts.legend != '' and (opts.is_timeseries or opts.is_profileonly): - prop=fm.FontProperties(size=opts.fs) - plt.legend(prop=prop, loc="%s" % (opts.loc)) - - # end of plotting - if opts.is_saveonly: - plt.savefig(pngname) - else: - plt.show() - - # closing input-file - if not opts.is_events: - infile.close() + ax.yaxis.set_major_formatter(ticker.NullFormatter()) + ax.yaxis.set_major_locator(ticker.NullLocator()) + ax.yaxis.set_minor_locator(ticker.NullLocator()) + + + # Making the title + if opts.title != '': + ax.set_title("\n".join(opts.title.split("\\n")), fontsize=opts.fs, x=opts.titlepos.split(",")[0], + y=opts.titlepos.split(",")[1], ha='%s' % (opts.titlepos.split(",")[-1])) + + # Putting the label + if opts.label != '': + ax.annotate("\n".join(opts.label.split("\\n")), fontsize=opts.fs, xycoords='axes fraction', xy=(0,0), + xytext=(opts.labelpos.split(",")[0], opts.labelpos.split(",")[1]), ha='%s' % (opts.labelpos.split(",")[-1])) + + # turn on the Legend (only when is used in profile-only mode or timeseries mode) + if opts.legend != '' and (opts.is_timeseries or opts.is_profileonly): + prop=fm.FontProperties(size=opts.fs) + plt.legend(prop=prop, loc="%s" % (opts.loc)) + + # end of plotting + if opts.is_saveonly: + plt.savefig(pngname) + else: + plt.show() + + # closing input-file + if not opts.is_events: + infile.close() diff --git a/bin/rfifind_stats.py b/bin/rfifind_stats.py index cdd0c8698..8874cddca 100755 --- a/bin/rfifind_stats.py +++ b/bin/rfifind_stats.py @@ -2,6 +2,12 @@ import sys from presto import rfifind +if len(sys.argv) != 2: + print("\nusage: {} file\n".format(sys.argv[0])) + sys.exit(1) + + + if __name__=="__main__": a = rfifind.rfifind(sys.argv[1]) sys.stderr.write("\nWARNING!: If raw data have channels in decreasing freq\n") diff --git a/bin/rrattrap.py b/bin/rrattrap.py index 9d0f93893..90b24e73a 100755 --- a/bin/rrattrap.py +++ b/bin/rrattrap.py @@ -520,7 +520,7 @@ def plot_sp_rated_pgplot(groups, ranks, inffile, ylow=0, yhigh=100, xlow=0, xhig ppgplot.pgsch(0.8) ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) # redundant with pgenv ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)") - ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\u-3\d)") + ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\\u-3\\d)") ppgplot.pgsch(1.0) ppgplot.pgmtxt('T', 2.5, 0.3, 0.0, "Single Pulse Results for %s" % obsinfo['src']) diff --git a/bin/weights_to_ignorechan.py b/bin/weights_to_ignorechan.py index 43e378890..cb9f36146 100755 --- a/bin/weights_to_ignorechan.py +++ b/bin/weights_to_ignorechan.py @@ -50,6 +50,11 @@ def build_pazline(chanline): return outstr if __name__=="__main__": + + if len(sys.argv) != 2: + print("\nusage: {} file\n".format(sys.argv[0])) + sys.exit(1) + # Read the channels and weights chans, weights = read_weights(sys.argv[1]) diff --git a/python/Makefile b/python/Makefile deleted file mode 100644 index 5309c0eb7..000000000 --- a/python/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -build: - python setup.py install --home=${PRESTO} - cd fftfit_src ; f2py -c fftfit.pyf *.f - cd fftfit_src ; cp fftfit*.so ${PRESTO}/lib/python - python fftfit_src/test_fftfit.py - -build3: - python3 setup.py install --home=${PRESTO} - cd fftfit_src ; f2py3 -c fftfit.pyf *.f - cd fftfit_src ; cp fftfit*.so ${PRESTO}/lib/python - python3 fftfit_src/test_fftfit.py - -clean: - rm -rf build - rm -f *~ *.o *.so *.pyc diff --git a/python/presto/cosine_rand.pickle b/python/presto/cosine_rand.pickle deleted file mode 100644 index 6d62aa33e..000000000 Binary files a/python/presto/cosine_rand.pickle and /dev/null differ diff --git a/python/presto/fftfit.py b/python/presto/fftfit.py new file mode 100644 index 000000000..213cabfed --- /dev/null +++ b/python/presto/fftfit.py @@ -0,0 +1 @@ +from _fftfit import * \ No newline at end of file diff --git a/python/presto/singlepulse/__init__.py b/python/presto/singlepulse/__init__.py index 4e734d3af..969f504d1 100644 --- a/python/presto/singlepulse/__init__.py +++ b/python/presto/singlepulse/__init__.py @@ -1,9 +1,9 @@ -import singlepulse.spio -import singlepulse.bary_and_topo -import singlepulse.rrattrap -import singlepulse.read_spd -import singlepulse.make_spd -import singlepulse.plot_spd -import singlepulse.spcand -import singlepulse.sp_pgplot -import singlepulse.rrattrap_config +from presto.singlepulse import spio +from presto.singlepulse import bary_and_topo +from presto.singlepulse import read_spd +from presto.singlepulse import spcand +from presto.singlepulse import sp_pgplot +from presto.singlepulse import rrattrap_config +from presto.singlepulse import rrattrap +#from presto.singlepulse import make_spd +from presto.singlepulse import plot_spd diff --git a/python/presto/singlepulse/make_spd.py b/python/presto/singlepulse/make_spd.py index d2ec50024..c270bf1f9 100755 --- a/python/presto/singlepulse/make_spd.py +++ b/python/presto/singlepulse/make_spd.py @@ -20,18 +20,22 @@ import optparse from presto import waterfaller from presto import psr_utils -from . import plot_spd -from presto.singlepulse import spcand as spcand -from presto.singlepulse import spio as spio +from presto.singlepulse import plot_spd +from presto.singlepulse import spcand +from presto.singlepulse import spio from presto import psrfits +from presto import filterbank + #import filterbank need to implement in PRESTO + DEBUG = True def print_debug(msg): if DEBUG: print(msg) -def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm, \ + +def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm, downsamp, scaleindep, width_bins, mask, maskfn, bandpass_corr): """ Runs the waterfaller. If dedispersing, there will be extra bins added to the 2D plot. @@ -41,10 +45,10 @@ def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm data: 2D array as an "object" array: 2D array ready to be plotted by sp_pgplot.plot_waterfall(array). """ - data, bins, nbins, start = waterfaller.waterfall(rawdatafile, start, duration, dm=dm, nbins=nbins, \ - nsub=nsub, subdm=subdm, zerodm=zerodm, \ - downsamp=downsamp, scaleindep=scaleindep, \ - width_bins=width_bins, mask=mask, \ + data, bins, nbins, start = waterfaller.waterfall(rawdatafile, start, duration, dm=dm, nbins=nbins, + nsub=nsub, subdm=subdm, zerodm=zerodm, + downsamp=downsamp, scaleindep=scaleindep, + width_bins=width_bins, mask=mask, maskfn=maskfn, bandpass_corr=bandpass_corr) array = np.array(data.data) if dm is not None: # If dedispersing the data, extra bins will be added. We need to cut off the extra bins to get back the appropriate window size. @@ -57,15 +61,16 @@ def waterfall_array(rawdatafile, start, duration, dm, nbins, nsub, subdm, zerodm array = (array[::-1]).astype(np.float16) return data, array -def make_spd_from_file(spdcand, rawdatafile, \ - txtfile, maskfile, \ - min_rank, group_rank, \ - plot, just_waterfall, \ - integrate_ts, integrate_spec, disp_pulse, \ - loc_pulse, nsub, \ - maxnumcands, \ - basename, \ - mask=False, bandpass_corr=True, barytime=True, \ + +def make_spd_from_file(spdcand, rawdatafile, + txtfile, maskfile, + min_rank, group_rank, + plot, just_waterfall, + integrate_ts, integrate_spec, disp_pulse, + loc_pulse, nsub, + maxnumcands, + basename, + mask=False, bandpass_corr=True, barytime=True, man_params=None): """ @@ -116,54 +121,54 @@ def make_spd_from_file(spdcand, rawdatafile, \ # Array for Plotting Dedispersed waterfall plot - zerodm - OFF - spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, \ - rawdatafile.frequencies[0], rawdatafile.frequencies[-1], \ - rawdatafile, loc_pulse=loc_pulse, dedisp = True, \ - scaleindep = None, zerodm = None, mask = mask, \ - barytime=barytime, \ + spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, + rawdatafile.frequencies[0], rawdatafile.frequencies[-1], + rawdatafile, loc_pulse=loc_pulse, dedisp = True, + scaleindep = None, zerodm = None, mask = mask, + barytime=barytime, nsub = nsub, bandpass_corr = bandpass_corr) #make an array to store header information for the spd files - temp_filename = basename+"_DM%.1f_%.1fs_rank_%i"%(spdcand.subdm, \ + temp_filename = basename+"_DM%.1f_%.1fs_rank_%i"%(spdcand.subdm, spdcand.topo_start_time, rank) print_debug("Running waterfaller with Zero-DM OFF...") # Add additional information to the header information array - data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, spdcand.zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) - text_array = np.array([args[0], rawdatafile.specinfo.telescope, \ - rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, \ - rawdatafile.specinfo.start_MJD[0], \ - rank, spdcand.nsub, spdcand.nbins, spdcand.subdm, \ - spdcand.sigma, spdcand.sample_number, spdcand.duration, \ - spdcand.width_bins, spdcand.pulse_width, rawdatafile.tsamp,\ - rawdatafile.specinfo.T, spdcand.topo_start_time, data.starttime, \ + text_array = np.array([args[0], rawdatafile.specinfo.telescope, + rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, + rawdatafile.specinfo.start_MJD[0], + rank, spdcand.nsub, spdcand.nbins, spdcand.subdm, + spdcand.sigma, spdcand.sample_number, spdcand.duration, + spdcand.width_bins, spdcand.pulse_width, rawdatafile.tsamp, + rawdatafile.specinfo.T, spdcand.topo_start_time, data.starttime, data.dt,data.numspectra, data.freqs.min(), data.freqs.max()]) #### Array for plotting Dedispersed waterfall plot zerodm - ON print_debug("Running Waterfaller with Zero-DM ON...") zerodm=True - data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) ####Sweeped without zerodm - spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, \ - rawdatafile.frequencies[0], rawdatafile.frequencies[-1], \ - rawdatafile, loc_pulse=loc_pulse, dedisp = None, \ - scaleindep = None, zerodm = None, mask = mask, \ - barytime=barytime, \ + spdcand.read_from_file(values[ii], rawdatafile.tsamp, rawdatafile.specinfo.N, + rawdatafile.frequencies[0], rawdatafile.frequencies[-1], + rawdatafile, loc_pulse=loc_pulse, dedisp = None, + scaleindep = None, zerodm = None, mask = mask, + barytime=barytime, nsub = nsub, bandpass_corr = bandpass_corr) - data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, spdcand.zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) text_array = np.append(text_array, spdcand.sweep_duration) text_array = np.append(text_array, data.starttime) @@ -179,35 +184,35 @@ def make_spd_from_file(spdcand, rawdatafile, \ # Sweeped with zerodm-on zerodm = True #downsamp_temp = 1 - data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) # Saving the arrays into the .spd file. with open(temp_filename+".spd", 'wb') as f: - np.savez_compressed(f, \ - Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),\ - Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),\ - Data_nozerodm = Data_nozerodm.astype(np.float16),\ - delays_nozerodm = delays_nozerodm, \ - freqs_nozerodm = freqs_nozerodm,\ - Data_zerodm = Data_zerodm.astype(np.float16), \ - dm_arr= list(map(np.float16, dm_arr)),\ - sigma_arr = list(map(np.float16, sigma_arr)), \ - width_arr =list(map(np.uint8, width_arr)),\ - dm_list= list(map(np.float16, dm_list)), \ - time_list = list(map(np.float16, time_list)), \ + np.savez_compressed(f, + Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16), + Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16), + Data_nozerodm = Data_nozerodm.astype(np.float16), + delays_nozerodm = delays_nozerodm, + freqs_nozerodm = freqs_nozerodm, + Data_zerodm = Data_zerodm.astype(np.float16), + dm_arr= list(map(np.float16, dm_arr)), + sigma_arr = list(map(np.float16, sigma_arr)), + width_arr =list(map(np.uint8, width_arr)), + dm_list= list(map(np.float16, dm_list)), + time_list = list(map(np.float16, time_list)), text_array = text_array) #### Arrays for Plotting DM vs Time is in plot_spd.plot(...) if plot: print_debug("Now plotting...") - plot_spd.plot(temp_filename+".spd", args[1:], \ - spec_width=1.5, loc_pulse=loc_pulse, \ - xwin=False, outfile=basename, \ - just_waterfall=just_waterfall, \ - integrate_spec=integrate_spec, \ - integrate_ts=integrate_ts, \ + plot_spd.plot(temp_filename+".spd", args[1:], + spec_width=1.5, loc_pulse=loc_pulse, + xwin=False, outfile=basename, + just_waterfall=just_waterfall, + integrate_spec=integrate_spec, + integrate_ts=integrate_ts, disp_pulse=disp_pulse, tar = None) print_debug("Finished plot %i " %ii+strftime("%Y-%m-%d %H:%M:%S")) numcands+= 1 @@ -221,18 +226,18 @@ def make_spd_from_file(spdcand, rawdatafile, \ print_debug("Finished group %i... "%rank+strftime("%Y-%m-%d %H:%M:%S")) print_debug("Finished running waterfaller... "+strftime("%Y-%m-%d %H:%M:%S")) -def make_spd_from_man_params(spdcand, rawdatafile, \ - txtfile, maskfile, \ - plot, just_waterfall, \ - subdm, dm, sweep_dm, \ - sigma, \ - start_time, duration, \ - width_bins, nbins, downsamp, \ - nsub, \ - scaleindep, \ - spec_width, loc_pulse, \ - integrate_ts, integrate_spec, disp_pulse, \ - basename, \ +def make_spd_from_man_params(spdcand, rawdatafile, + txtfile, maskfile, + plot, just_waterfall, + subdm, dm, sweep_dm, + sigma, + start_time, duration, + width_bins, nbins, downsamp, + nsub, + scaleindep, + spec_width, loc_pulse, + integrate_ts, integrate_spec, disp_pulse, + basename, mask, bandpass_corr, barytime, man_params): """ Makes spd files from output files of rratrap. @@ -277,51 +282,51 @@ def make_spd_from_man_params(spdcand, rawdatafile, \ nsub = rawdatafile.nchan # Array for Plotting Dedispersed waterfall plot - zerodm - OFF - spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, \ - width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, \ - rawdatafile.specinfo.N, \ - rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, \ - loc_pulse=loc_pulse, dedisp=True, scaleindep=False, zerodm=False, \ + spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, + width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, + rawdatafile.specinfo.N, + rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, + loc_pulse=loc_pulse, dedisp=True, scaleindep=False, zerodm=False, mask=mask, barytime=barytime, bandpass_corr=bandpass_corr) #make an array to store header information for the spd files temp_filename = basename+"_DM%.1f_%.1fs"%(spdcand.subdm, spdcand.topo_start_time) print_debug("Running waterfaller with Zero-DM OFF...") - data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_dedisp_nozerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, spdcand.zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) # Add additional information to the header information array - text_array = np.array([args[0], rawdatafile.specinfo.telescope, \ - rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, \ - rawdatafile.specinfo.start_MJD[0], rank, \ - spdcand.nsub, spdcand.nbins, \ - spdcand.subdm, spdcand.sigma, spdcand.sample_number, \ - spdcand.duration, spdcand.width_bins, spdcand.pulse_width, \ - rawdatafile.tsamp, rawdatafile.specinfo.T, spdcand.topo_start_time, \ - data.starttime, data.dt,data.numspectra, data.freqs.min(), \ + text_array = np.array([args[0], rawdatafile.specinfo.telescope, + rawdatafile.specinfo.ra_str, rawdatafile.specinfo.dec_str, + rawdatafile.specinfo.start_MJD[0], rank, + spdcand.nsub, spdcand.nbins, + spdcand.subdm, spdcand.sigma, spdcand.sample_number, + spdcand.duration, spdcand.width_bins, spdcand.pulse_width, + rawdatafile.tsamp, rawdatafile.specinfo.T, spdcand.topo_start_time, + data.starttime, data.dt,data.numspectra, data.freqs.min(), data.freqs.max()]) #### Array for plotting Dedispersed waterfall plot zerodm - ON print_debug("Running Waterfaller with Zero-DM ON...") zerodm=True - data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_dedisp_zerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) ####Sweeped without zerodm - spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, \ - width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, \ - rawdatafile.specinfo.N, \ - rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, \ - loc_pulse=loc_pulse, dedisp=None, scaleindep=None, zerodm=None, mask=mask, \ + spdcand.manual_params(subdm, dm, sweep_dm, sigma, start_time, + width_bins, downsamp, duration, nbins, nsub, rawdatafile.tsamp, + rawdatafile.specinfo.N, + rawdatafile.frequencies[0], rawdatafile.frequencies[-1], rawdatafile, + loc_pulse=loc_pulse, dedisp=None, scaleindep=None, zerodm=None, mask=mask, barytime=barytime, bandpass_corr=bandpass_corr) - data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, spdcand.zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_nozerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, spdcand.zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) text_array = np.append(text_array, spdcand.sweep_duration) text_array = np.append(text_array, data.starttime) @@ -337,27 +342,27 @@ def make_spd_from_man_params(spdcand, rawdatafile, \ # Sweeped with zerodm-on zerodm = True #downsamp_temp = 1 - data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, \ - spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, \ - spdcand.subdm, zerodm, spdcand.downsamp, \ - spdcand.scaleindep, spdcand.width_bins, \ + data, Data_zerodm = waterfall_array(rawdatafile, spdcand.start, + spdcand.duration, spdcand.dm, spdcand.nbins, spdcand.nsub, + spdcand.subdm, zerodm, spdcand.downsamp, + spdcand.scaleindep, spdcand.width_bins, spdcand.mask, maskfile, spdcand.bandpass_corr) with open(temp_filename+".spd", 'wb') as f: - np.savez_compressed(f, \ - Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16),\ - Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16),\ - Data_nozerodm = Data_nozerodm.astype(np.float16),\ - delays_nozerodm = delays_nozerodm, \ - freqs_nozerodm = freqs_nozerodm,\ - Data_zerodm = Data_zerodm.astype(np.float16), \ + np.savez_compressed(f, + Data_dedisp_nozerodm = Data_dedisp_nozerodm.astype(np.float16), + Data_dedisp_zerodm = Data_dedisp_zerodm.astype(np.float16), + Data_nozerodm = Data_nozerodm.astype(np.float16), + delays_nozerodm = delays_nozerodm, + freqs_nozerodm = freqs_nozerodm, + Data_zerodm = Data_zerodm.astype(np.float16), text_array = text_array) #### Arrays for Plotting DM vs Time is in plot_spd.plot(...) if plot: print_debug("Now plotting...") - plot_spd.plot(temp_filename+".spd", args[1:], \ - spec_width=spec_width, loc_pulse=loc_pulse, xwin=False, \ - outfile = basename, just_waterfall=just_waterfall, \ - integrate_spec=integrate_spec, integrate_ts=integrate_ts, \ + plot_spd.plot(temp_filename+".spd", args[1:], + spec_width=spec_width, loc_pulse=loc_pulse, xwin=False, + outfile = basename, just_waterfall=just_waterfall, + integrate_spec=integrate_spec, integrate_ts=integrate_ts, disp_pulse=disp_pulse, tar = None) def main(): @@ -384,163 +389,163 @@ def main(): spdcand = spcand.params() if not options.man_params: print_debug('Maximum number of candidates to plot: %i'%options.maxnumcands) - make_spd_from_file(spdcand, rawdatafile, \ - options.txtfile, options.maskfile, \ - options.min_rank, options.group_rank, \ - options.plot, options.just_waterfall, \ - options.integrate_ts, options.integrate_spec, options.disp_pulse, \ - options.loc_pulse, options.nsub, \ - options.maxnumcands, \ - basename, \ - mask=options.mask, barytime=options.barytime, \ + make_spd_from_file(spdcand, rawdatafile, + options.txtfile, options.maskfile, + options.min_rank, options.group_rank, + options.plot, options.just_waterfall, + options.integrate_ts, options.integrate_spec, options.disp_pulse, + options.loc_pulse, options.nsub, + options.maxnumcands, + basename, + mask=options.mask, barytime=options.barytime, bandpass_corr=options.bandpass_corr) else: - print_debug("Making spd files based on mannual parameters. I suggest" \ + print_debug("Making spd files based on mannual parameters. I suggest" "reading in parameters from the groups.txt file.") - make_spd_from_man_params(spdcand, rawdatafile, \ - options.txtfile, options.maskfile, \ - options.plot, options.just_waterfall, \ - options.subdm, options.dm, options.sweep_dms, \ - options.sigma, \ - options.start, options.duration, \ - options.width_bins, options.nbins, options.downsamp, \ - options.nsub, \ - options.scaleindep, \ - options.spec_width, options.loc_pulse, \ - options.integrate_ts, options.integrate_spec, options.disp_pulse, \ - basename, \ - options.mask, options.bandpass_corr, options.barytime, \ + make_spd_from_man_params(spdcand, rawdatafile, + options.txtfile, options.maskfile, + options.plot, options.just_waterfall, + options.subdm, options.dm, options.sweep_dms, + options.sigma, + options.start, options.duration, + options.width_bins, options.nbins, options.downsamp, + options.nsub, + options.scaleindep, + options.spec_width, options.loc_pulse, + options.integrate_ts, options.integrate_spec, options.disp_pulse, + basename, + options.mask, options.bandpass_corr, options.barytime, options.man_params) if __name__=='__main__': - parser = optparse.OptionParser(prog="sp_pipeline..py", \ - version=" Chitrang Patel (May. 12, 2015)", \ - usage="%prog INFILE(PsrFits FILE, SINGLEPULSE FILES)", \ - description="Create single pulse plots to show the " \ - "frequency sweeps of a single pulse, " \ - "DM vs time, and SNR vs DM,"\ + parser = optparse.OptionParser(prog="sp_pipeline..py", + version=" Chitrang Patel (May. 12, 2015)", + usage="%prog INFILE(PsrFits FILE, SINGLEPULSE FILES)", + description="Create single pulse plots to show the " + "frequency sweeps of a single pulse, " + "DM vs time, and SNR vs DM," "in psrFits data.") - parser.add_option('--groupsfile', dest='txtfile', type='string', \ - help="Give the groups.txt file to read in the groups information.", \ + parser.add_option('--groupsfile', dest='txtfile', type='string', + help="Give the groups.txt file to read in the groups information.", default=None) - parser.add_option('--maskfile', dest='maskfile', type='string', \ - help="Mask file produced by rfifind. Used for " \ - "masking and bandpass correction.", \ + parser.add_option('--maskfile', dest='maskfile', type='string', + help="Mask file produced by rfifind. Used for " + "masking and bandpass correction.", default=None) - parser.add_option('--mask', dest='mask', action="store_true", \ - help="Mask data using rfifind mask (Default: Don't mask).", \ + parser.add_option('--mask', dest='mask', action="store_true", + help="Mask data using rfifind mask (Default: Don't mask).", default=False) - parser.add_option('--numcands', dest='maxnumcands', type='int', \ - help="Maximum number of candidates to plot. (Default: 100).", \ + parser.add_option('--numcands', dest='maxnumcands', type='int', + help="Maximum number of candidates to plot. (Default: 100).", default=100) - parser.add_option('--subdm', dest='subdm', type='float', \ - help="DM to use when subbanding. (Default: " \ + parser.add_option('--subdm', dest='subdm', type='float', + help="DM to use when subbanding. (Default: " "same as --dm)", default=None) - parser.add_option('-s', '--nsub', dest='nsub', type='int', \ - help="Number of subbands to use. Must be a factor " \ - "of number of channels. (Default: " \ + parser.add_option('-s', '--nsub', dest='nsub', type='int', + help="Number of subbands to use. Must be a factor " + "of number of channels. (Default: " "number of channels)", default=None) - parser.add_option('--sigma', dest='sigma', type='float', \ - help="Signal-to-Noise of the pulse." \ - "(Default: Do not specify. In this case you must specify the " \ - "number of subbands.)", \ + parser.add_option('--sigma', dest='sigma', type='float', + help="Signal-to-Noise of the pulse." + "(Default: Do not specify. In this case you must specify the " + "number of subbands.)", default=None) - parser.add_option('-d', '--dm', dest='dm', type='float', \ - help="DM to use when dedispersing data for plot. " \ + parser.add_option('-d', '--dm', dest='dm', type='float', + help="DM to use when dedispersing data for plot. " "(Default: 0 pc/cm^3)", default=0.0) - parser.add_option('--show-ts', dest='integrate_ts', action='store_true', \ - help="Plot the time series. " \ + parser.add_option('--show-ts', dest='integrate_ts', action='store_true', + help="Plot the time series. " "(Default: Dont show the time series)", default=False) - parser.add_option('--show-spec', dest='integrate_spec', action='store_true', \ - help="Plot the spectrum. " \ + parser.add_option('--show-spec', dest='integrate_spec', action='store_true', + help="Plot the spectrum. " "(Default: Do not show the spectrum)", default=False) - parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice " \ - "this number times the pulse width is the window around the " \ - "pulse considered for the spectrum. (Default: 1.5)", \ + parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice " + "this number times the pulse width is the window around the " + "pulse considered for the spectrum. (Default: 1.5)", default=1.5) - parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of " \ - "the window length where the pulse is located." \ - "(Default: 0.5: half way in.)", \ + parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of " + "the window length where the pulse is located." + "(Default: 0.5: half way in.)", default=0.5) - parser.add_option('--show-sweep', dest='disp_pulse', action='store_true', \ - help="Plot the inset dispersed pulse. " \ + parser.add_option('--show-sweep', dest='disp_pulse', action='store_true', + help="Plot the inset dispersed pulse. " "(Default: Do not show the dispersed pulse)", default=False) - parser.add_option('--bandpass', dest='bandpass_corr', action='store_true', \ - help="Correct for the bandpass. Requires an rfifind " \ - "mask provided by --mask option." \ + parser.add_option('--bandpass', dest='bandpass_corr', action='store_true', + help="Correct for the bandpass. Requires an rfifind " + "mask provided by --mask option." "(Default: Do not remove bandpass)", default=False) - parser.add_option('-T', '--start-time', dest='start', type='float', \ - help="Time into observation (in seconds) at which " \ + parser.add_option('-T', '--start-time', dest='start', type='float', + help="Time into observation (in seconds) at which " "to start plot.") - parser.add_option('--notopo', dest='barytime', action='store_false', \ - help="Do not topocenter the given time. Use this option " \ - "only if the given time is topocentric." \ + parser.add_option('--notopo', dest='barytime', action='store_false', + help="Do not topocenter the given time. Use this option " + "only if the given time is topocentric." "(Default: topocenter the given barycentric time)", default=True) - parser.add_option('-t', '--duration', dest='duration', type='float', \ + parser.add_option('-t', '--duration', dest='duration', type='float', help="Duration (in seconds) of plot.") - parser.add_option('-n', '--nbins', dest='nbins', type='int', \ - help="Number of time bins to plot. This option takes " \ - "precedence over -t/--duration if both are " \ + parser.add_option('-n', '--nbins', dest='nbins', type='int', + help="Number of time bins to plot. This option takes " + "precedence over -t/--duration if both are " "provided.") - parser.add_option('--width-bins', dest='width_bins', type='int', \ - help="Smooth each channel/subband with a boxcar " \ - "this many bins wide. (Default: Don't smooth)", \ + parser.add_option('--width-bins', dest='width_bins', type='int', + help="Smooth each channel/subband with a boxcar " + "this many bins wide. (Default: Don't smooth)", default=1) - parser.add_option('--sweep-dm', dest='sweep_dms', type='float', \ - action='append', \ - help="Show the frequency sweep using this DM. " \ + parser.add_option('--sweep-dm', dest='sweep_dms', type='float', + action='append', + help="Show the frequency sweep using this DM. " "(Default: Don't show sweep)", default=[]) - parser.add_option('--sweep-posn', dest='sweep_posns', type='float', \ - action='append', \ - help="Show the frequency sweep at this position. " \ - "The position refers to the high-frequency " \ - "edge of the plot. Also, the position should " \ - "be a number between 0 and 1, where 0 is the " \ + parser.add_option('--sweep-posn', dest='sweep_posns', type='float', + action='append', + help="Show the frequency sweep at this position. " + "The position refers to the high-frequency " + "edge of the plot. Also, the position should " + "be a number between 0 and 1, where 0 is the " "left edge of the plot. " "(Default: 0)", default=None) - parser.add_option('--downsamp', dest='downsamp', type='int', \ - help="Factor to downsample data by. (Default: 1).", \ + parser.add_option('--downsamp', dest='downsamp', type='int', + help="Factor to downsample data by. (Default: 1).", default=1) - parser.add_option('--scaleindep', dest='scaleindep', action='store_true', \ - help="If this flag is set scale each channel " \ - "independently. (Default: Scale using " \ - "global maximum.)", \ + parser.add_option('--scaleindep', dest='scaleindep', action='store_true', + help="If this flag is set scale each channel " + "independently. (Default: Scale using " + "global maximum.)", default=False) - parser.add_option('--min-rank', dest='min_rank', type='int',\ - help="Min rank you want to make spd files for. (Default: 3)"\ - " Rank 1: noise,"\ - " Rank 2: RFI,"\ - " Rank 3: maybe astrophysical, very low S/N,"\ - " Rank 4: probably astrophysical but weak, low S/N,"\ - " Rank 5: Very high chance of being astrophysical. S/N>8.0,"\ - " Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",\ + parser.add_option('--min-rank', dest='min_rank', type='int', + help="Min rank you want to make spd files for. (Default: 3)" + " Rank 1: noise," + " Rank 2: RFI," + " Rank 3: maybe astrophysical, very low S/N," + " Rank 4: probably astrophysical but weak, low S/N," + " Rank 5: Very high chance of being astrophysical. S/N>8.0," + " Rank 6: Almost guranteed to be astrophysical. S/N>9.2,", default=3) - parser.add_option('--group-rank', dest='group_rank', type='int',\ - help="Min rank you want to make spd files for. (Default: None)"\ - " Rank 1: noise,"\ - " Rank 2: RFI,"\ - " Rank 3: maybe astrophysical, very low S/N,"\ - " Rank 4: probably astrophysical but weak, low S/N,"\ - " Rank 5: Very high chance of being astrophysical. S/N>8.0,"\ - " Rank 6: Almost guranteed to be astrophysical. S/N>9.2,",\ + parser.add_option('--group-rank', dest='group_rank', type='int', + help="Min rank you want to make spd files for. (Default: None)" + " Rank 1: noise," + " Rank 2: RFI," + " Rank 3: maybe astrophysical, very low S/N," + " Rank 4: probably astrophysical but weak, low S/N," + " Rank 5: Very high chance of being astrophysical. S/N>8.0," + " Rank 6: Almost guranteed to be astrophysical. S/N>9.2,", default=None) - parser.add_option('--use_manual_params', dest='man_params', action='store_true', \ - help="If this flag is not set it will use the parameters " \ - "from the RRATrap groups.txt file. "\ - "(Default: Not use this flag. When using "\ - "parameters from the output of rratrap. Just input"\ - "groups.txt file, mask file, the PSRFITs file"\ - "and the .singlepulse files as input. No need to specify any of"\ - " the other arguments.)",\ + parser.add_option('--use_manual_params', dest='man_params', action='store_true', + help="If this flag is not set it will use the parameters " + "from the RRATrap groups.txt file. " + "(Default: Not use this flag. When using " + "parameters from the output of rratrap. Just input" + "groups.txt file, mask file, the PSRFITs file" + "and the .singlepulse files as input. No need to specify any of" + " the other arguments.)", default=False) - parser.add_option('-o', dest='outbasenm', type='string', \ - help="basename of the output spd file.", \ + parser.add_option('-o', dest='outbasenm', type='string', + help="basename of the output spd file.", default=None) - parser.add_option('--noplot', dest='plot', action='store_false', \ - help="Do not generate spd plots.", \ + parser.add_option('--noplot', dest='plot', action='store_false', + help="Do not generate spd plots.", default=True) - parser.add_option('--just-waterfall', dest='just_waterfall', action='store_true', \ - help="Only produce the waterfall plots (frequency vs Time).", \ + parser.add_option('--just-waterfall', dest='just_waterfall', action='store_true', + help="Only produce the waterfall plots (frequency vs Time).", default=False) options, args = parser.parse_args() @@ -553,11 +558,11 @@ def main(): raise ValueError("The groups.txt file must be given on the command line! ") else: if not hasattr(options, 'start'): - raise ValueError("Start time (-T/--start-time) " \ + raise ValueError("Start time (-T/--start-time) " "must be given on command line!") if (not hasattr(options, 'duration')) and (not hasattr(options, 'nbins')): - raise ValueError("One of duration (-t/--duration) " \ - "and num bins (-n/--nbins)" \ + raise ValueError("One of duration (-t/--duration) " + "and num bins (-n/--nbins)" "must be given on command line!") if options.subdm is None: options.subdm = options.dm diff --git a/python/presto/singlepulse/plot_spd.py b/python/presto/singlepulse/plot_spd.py index 828bdfc3e..ca8751ecd 100755 --- a/python/presto/singlepulse/plot_spd.py +++ b/python/presto/singlepulse/plot_spd.py @@ -12,11 +12,11 @@ from builtins import map import numpy as np import optparse -import tarfile +import tarfile from subprocess import Popen, PIPE -import presto.singlepulse.sp_pgplot as sp_pgplot -import presto.singlepulse.read_spd as read_spd -import presto.singlepulse.spio as spio +from presto.singlepulse import sp_pgplot +from presto.singlepulse import read_spd +from presto.singlepulse import spio def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=False, outfile="spdplot", @@ -52,17 +52,17 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal if not spdfile.endswith(".spd"): raise ValueError("The first file must be a .spd file") - #npzfile = np.load(spdfile) - spdobj = read_spd.spd(spdfile) + # npzfile = np.load(spdfile) + spdobj = read_spd.spd(spdfile) ##### Read in the header information and other required variables for the plots. ###### - #text_array = npzfile['text_array'] + # text_array = npzfile['text_array'] man_params = spdobj.man_params fn = spdobj.filename telescope = spdobj.telescope RA = spdobj.ra dec = spdobj.dec MJD = spdobj.mjd - mjd = Popen(["mjd2cal", "%f"%MJD], stdout=PIPE, stderr=PIPE) + mjd = Popen(["mjd2cal", "%f" % MJD], stdout=PIPE, stderr=PIPE) date, err = mjd.communicate() date = date.split()[2:5] rank = spdobj.rank @@ -77,7 +77,7 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal tsamp = spdobj.tsamp Total_observed_time = spdobj.total_obs_time topo_start = spdobj.pulse_peak_time - start = topo_start - loc_pulse*duration + start = topo_start - loc_pulse * duration datastart = spdobj.waterfall_start_time datasamp = spdobj.waterfall_tsamp datanumspectra = spdobj.waterfall_prededisp_nbins @@ -86,7 +86,7 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal sweep_duration = spdobj.sweep_duration sweeped_start = spdobj.sweep_start_time bary_start = spdobj.bary_pulse_peak_time - downsamp = datasamp/tsamp + downsamp = datasamp / tsamp if xwin: pgplot_device = "/XWIN" else: @@ -94,22 +94,24 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal if pgplot_device: sp_pgplot.ppgplot.pgopen(pgplot_device) else: - if (outfile == "spdplot"): # default filename + if (outfile == "spdplot"): # default filename if rank: - sp_pgplot.ppgplot.pgopen(fn[:-5]+'_DM%.1f_%.1fs_rank_%i.spd.ps/VPS'%(subdm, (start+loc_pulse*duration), rank)) + sp_pgplot.ppgplot.pgopen( + fn[:-5] + '_DM%.1f_%.1fs_rank_%i.spd.ps/VPS' % (subdm, (start + loc_pulse * duration), rank)) else: - sp_pgplot.ppgplot.pgopen(fn[:-5]+'_DM%.1f_%.1fs.spd.ps/VPS'%(subdm, (start+loc_pulse*duration))) + sp_pgplot.ppgplot.pgopen(fn[:-5] + '_DM%.1f_%.1fs.spd.ps/VPS' % (subdm, (start + loc_pulse * duration))) else: if rank: - sp_pgplot.ppgplot.pgopen(outfile+'_DM%.1f_%.1fs_rank_%i.spd.ps/VPS'%(subdm, (start+loc_pulse*duration), rank)) + sp_pgplot.ppgplot.pgopen( + outfile + '_DM%.1f_%.1fs_rank_%i.spd.ps/VPS' % (subdm, (start + loc_pulse * duration), rank)) else: - sp_pgplot.ppgplot.pgopen(outfile+'_DM%.1f_%.1fs.spd.ps/VPS'%(subdm, (start+loc_pulse*duration))) + sp_pgplot.ppgplot.pgopen(outfile + '_DM%.1f_%.1fs.spd.ps/VPS' % (subdm, (start + loc_pulse * duration))) if (just_waterfall == False): - sp_pgplot.ppgplot.pgpap(10.25, 8.5/11.0) + sp_pgplot.ppgplot.pgpap(10.25, 8.5 / 11.0) # Dedispersed waterfall plot - zerodm - OFF array = spdobj.data_nozerodm_dedisp.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.50, 0.80) - sp_pgplot.ppgplot.pgswin(datastart-start, datastart-start+datanumspectra*datasamp, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0) @@ -117,135 +119,141 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") if not integrate_spec: sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp], rangey = [min_freq, max_freq], image = 'apjgrey') - - #### Plot Dedispersed Time series - Zerodm filter - Off - Dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp - if integrate_ts: + sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp], + rangey=[min_freq, max_freq], image='apjgrey') + + #### Plot Dedispersed Time series - Zerodm filter - Off + Dedisp_ts = array[::-1].sum(axis=0) + times = np.arange(datanumspectra) * datasamp + if integrate_ts: sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.80, 0.90) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(Dedisp_ts), 1.05*np.max(Dedisp_ts)) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(Dedisp_ts), + 1.05 * np.max(Dedisp_ts)) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,Dedisp_ts) + sp_pgplot.ppgplot.pgline(times, Dedisp_ts) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgsci(1) - - errx1 = np.array([0.60 * (datastart-start+duration)]) + + errx1 = np.array([0.60 * (datastart - start + duration)]) erry1 = np.array([0.60 * np.max(Dedisp_ts)]) erry2 = np.array([np.std(Dedisp_ts)]) errx2 = np.array([pulse_width]) sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - + #### Plot Spectrum - Zerodm filter - Off if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] + spectrum_window = spec_width * pulse_width + window_width = int(spectrum_window / datasamp) + # burst_bin = int(datanumspectra*loc_pulse/downsamp) + burst_bin = int(nbins * loc_pulse / downsamp) + on_spec = array[..., burst_bin - window_width:burst_bin + window_width] Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) + freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.5, 0.8) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) + sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs) sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") sp_pgplot.ppgplot.pgsch(0.7) sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") sp_pgplot.ppgplot.pgsch(0.8) - - #Dedispersed waterfall plot - Zerodm ON + + # Dedispersed waterfall plot - Zerodm ON sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.1, 0.40) - sp_pgplot.ppgplot.pgswin(datastart-start , datastart-start+datanumspectra*datasamp, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s"%datastart) + sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s" % datastart) sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") if not integrate_spec: sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") array = spdobj.data_zerodm_dedisp.astype(np.float64) - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp],rangey = [min_freq, max_freq],image = 'apjgrey') + sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp], + rangey=[min_freq, max_freq], image='apjgrey') #### Plot Dedispersed Time series - Zerodm filter - On - dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp + dedisp_ts = array[::-1].sum(axis=0) + times = np.arange(datanumspectra) * datasamp if integrate_ts: sp_pgplot.ppgplot.pgsvp(0.07, 0.40, 0.40, 0.50) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(dedisp_ts), 1.05*np.max(dedisp_ts)) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(dedisp_ts), + 1.05 * np.max(dedisp_ts)) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,dedisp_ts) - errx1 = np.array([0.60 * (datastart-start+duration)]) + sp_pgplot.ppgplot.pgline(times, dedisp_ts) + errx1 = np.array([0.60 * (datastart - start + duration)]) erry1 = np.array([0.60 * np.max(dedisp_ts)]) erry2 = np.array([np.std(dedisp_ts)]) errx2 = np.array([pulse_width]) sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - + #### Plot Spectrum - Zerodm filter - On if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] + spectrum_window = spec_width * pulse_width + window_width = int(spectrum_window / datasamp) + # burst_bin = int(datanumspectra*loc_pulse/downsamp) + burst_bin = int(nbins * loc_pulse / downsamp) + on_spec = array[..., burst_bin - window_width:burst_bin + window_width] Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) + freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) sp_pgplot.ppgplot.pgsvp(0.4, 0.47, 0.1, 0.4) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) + sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs) sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") sp_pgplot.ppgplot.pgsch(0.7) sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") sp_pgplot.ppgplot.pgsch(0.8) - + if disp_pulse: # Sweeped waterfall plot Zerodm - OFF array = spdobj.data_nozerodm.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.50, 0.70) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(4) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') + sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration], + rangey=[min_freq, max_freq], image='apjgrey') delays = spdobj.dmsweep_delays freqs = spdobj.dmsweep_freqs sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration + sweepstart = sweeped_start - 0.2 * sweep_duration sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) + sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs) sp_pgplot.ppgplot.pgsci(1) sp_pgplot.ppgplot.pgslw(3) - + # Sweeped waterfall plot Zerodm - ON array = spdobj.data_zerodm.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.20, 0.40, 0.1, 0.3) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(4) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') + sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration], + rangey=[min_freq, max_freq], image='apjgrey') sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration + sweepstart = sweeped_start - 0.2 * sweep_duration sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) + sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs) sp_pgplot.ppgplot.pgsci(1) - + #### Figure texts if integrate_spec: sp_pgplot.ppgplot.pgsvp(0.81, 0.97, 0.64, 0.909) @@ -254,39 +262,39 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal sp_pgplot.ppgplot.pgsvp(0.745, 0.97, 0.64, 0.909) sp_pgplot.ppgplot.pgsch(0.7) sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" %RA) - sp_pgplot.ppgplot.pgmtxt('T', -2.6, 0.01, 0.0, "DEC: %s" %dec) - sp_pgplot.ppgplot.pgmtxt('T', -4.1, 0.01, 0.0, "MJD: %f" %MJD) - sp_pgplot.ppgplot.pgmtxt('T', -5.6, 0.01, 0.0, "Obs date: %s %s %s" %(date[0], date[1], date[2])) - sp_pgplot.ppgplot.pgmtxt('T', -7.1, 0.01, 0.0, "Telescope: %s" %telescope) - sp_pgplot.ppgplot.pgmtxt('T', -8.6, 0.01, 0.0, "DM: %.2f pc cm\\u-3\\d" %dm) + sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" % RA) + sp_pgplot.ppgplot.pgmtxt('T', -2.6, 0.01, 0.0, "DEC: %s" % dec) + sp_pgplot.ppgplot.pgmtxt('T', -4.1, 0.01, 0.0, "MJD: %f" % MJD) + sp_pgplot.ppgplot.pgmtxt('T', -5.6, 0.01, 0.0, "Obs date: %s %s %s" % (date[0], date[1], date[2])) + sp_pgplot.ppgplot.pgmtxt('T', -7.1, 0.01, 0.0, "Telescope: %s" % telescope) + sp_pgplot.ppgplot.pgmtxt('T', -8.6, 0.01, 0.0, "DM: %.2f pc cm\\u-3\\d" % dm) if sigma: - sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\\dMAX\\u: %.2f" %sigma) + sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\\dMAX\\u: %.2f" % sigma) else: sp_pgplot.ppgplot.pgmtxt('T', -10.1, 0.01, 0.0, "S/N\\dMAX\\u: N/A") - sp_pgplot.ppgplot.pgmtxt('T', -11.6, 0.01, 0.0, "Number of samples: %i" %nbins) - sp_pgplot.ppgplot.pgmtxt('T', -13.1, 0.01, 0.0, "Number of subbands: %i" %nsub) - sp_pgplot.ppgplot.pgmtxt('T', -14.6, 0.01, 0.0, "Pulse width: %.2f ms" %(pulse_width*1e3)) - sp_pgplot.ppgplot.pgmtxt('T', -16.1, 0.01, 0.0, "Sampling time: %.3f \\gms" %(tsamp*1e6)) - sp_pgplot.ppgplot.pgmtxt('T', -17.6, 0.0, 0.0, "Bary pulse peak time: %.2f s" %(bary_start)) + sp_pgplot.ppgplot.pgmtxt('T', -11.6, 0.01, 0.0, "Number of samples: %i" % nbins) + sp_pgplot.ppgplot.pgmtxt('T', -13.1, 0.01, 0.0, "Number of subbands: %i" % nsub) + sp_pgplot.ppgplot.pgmtxt('T', -14.6, 0.01, 0.0, "Pulse width: %.2f ms" % (pulse_width * 1e3)) + sp_pgplot.ppgplot.pgmtxt('T', -16.1, 0.01, 0.0, "Sampling time: %.3f \\gms" % (tsamp * 1e6)) + sp_pgplot.ppgplot.pgmtxt('T', -17.6, 0.0, 0.0, "Bary pulse peak time: %.2f s" % (bary_start)) sp_pgplot.ppgplot.pgsvp(0.07, 0.7, 0.01, 0.05) - sp_pgplot.ppgplot.pgmtxt('T', -2.1, 0.01, 0.0, "%s" %fn) - - #DM vs SNR + sp_pgplot.ppgplot.pgmtxt('T', -2.1, 0.01, 0.0, "%s" % fn) + + # DM vs SNR if not man_params: dm_arr = np.float32(spdobj.dmVt_this_dms) - sigma_arr = np.float32 (spdobj.dmVt_this_sigmas) - time_arr = np.float32 (spdobj.dmVt_this_times) + sigma_arr = np.float32(spdobj.dmVt_this_sigmas) + time_arr = np.float32(spdobj.dmVt_this_times) if integrate_spec: sp_pgplot.ppgplot.pgsvp(0.55, 0.80, 0.65, 0.90) else: sp_pgplot.ppgplot.pgsvp(0.48, 0.73, 0.65, 0.90) - sp_pgplot.ppgplot.pgswin(np.min(dm_arr), np.max(dm_arr), 0.95*np.min(sigma_arr), 1.05*np.max(sigma_arr)) + sp_pgplot.ppgplot.pgswin(np.min(dm_arr), np.max(dm_arr), 0.95 * np.min(sigma_arr), 1.05 * np.max(sigma_arr)) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\\u-3\d)") + sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\\u-3\\d)") sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-noise") sp_pgplot.ppgplot.pgpt(dm_arr, sigma_arr, 20) else: @@ -330,179 +338,186 @@ def plot(spdfile, singlepulsefiles=None, spec_width=1.5, loc_pulse=0.5, xwin=Fal sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)") sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\\u-3\\d)") else: - #sp_pgplot.ppgplot.pgpap(10.25, 10.0/5.0) + # sp_pgplot.ppgplot.pgpap(10.25, 10.0/5.0) sp_pgplot.ppgplot.pgpap(8.0, 1.5) # Dedispersed waterfall plot - zerodm - OFF array = spdobj.data_nozerodm_dedisp.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.44, 0.75) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart -start+datanumspectra*datasamp, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCNST", 0, 0) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp], rangey = [min_freq, max_freq], image = 'apjgrey') - + sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp], + rangey=[min_freq, max_freq], image='apjgrey') + #### Plot Dedispersed Time series - Zerodm filter - Off - Dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp + Dedisp_ts = array[::-1].sum(axis=0) + times = np.arange(datanumspectra) * datasamp if integrate_ts: sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.75, 0.83) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(Dedisp_ts), 1.05*np.max(Dedisp_ts)) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(Dedisp_ts), + 1.05 * np.max(Dedisp_ts)) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,Dedisp_ts) + sp_pgplot.ppgplot.pgline(times, Dedisp_ts) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgsci(1) - errx1 = np.array([0.60 * (datastart-start+duration)]) + errx1 = np.array([0.60 * (datastart - start + duration)]) erry1 = np.array([0.60 * np.max(Dedisp_ts)]) erry2 = np.array([np.std(Dedisp_ts)]) errx2 = np.array([pulse_width]) sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - + #### Plot Spectrum - Zerodm filter - Off if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] + spectrum_window = spec_width * pulse_width + window_width = int(spectrum_window / datasamp) + # burst_bin = int(datanumspectra*loc_pulse/downsamp) + burst_bin = int(nbins * loc_pulse / downsamp) + on_spec = array[..., burst_bin - window_width:burst_bin + window_width] Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) + freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) sp_pgplot.ppgplot.pgsvp(0.7, 0.9, 0.44, 0.75) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) + sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs) sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - Off") sp_pgplot.ppgplot.pgsch(0.7) sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") sp_pgplot.ppgplot.pgsch(0.8) - - #Dedispersed waterfall plot - Zerodm ON + + # Dedispersed waterfall plot - Zerodm ON array = spdobj.data_zerodm_dedisp.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.1, 0.70, 0.05, 0.36) - sp_pgplot.ppgplot.pgswin(datastart-start , datastart-start+datanumspectra*datasamp, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + datanumspectra * datasamp, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0) - sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s"%datastart) + sp_pgplot.ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time - %.2f s" % datastart) sp_pgplot.ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Observing Frequency (MHz)") - sp_pgplot.plot_waterfall(array,rangex = [datastart-start, datastart-start+datanumspectra*datasamp],rangey = [min_freq, max_freq],image = 'apjgrey') - - + sp_pgplot.plot_waterfall(array, rangex=[datastart - start, datastart - start + datanumspectra * datasamp], + rangey=[min_freq, max_freq], image='apjgrey') + #### Plot Dedispersed Time series - Zerodm filter - On - dedisp_ts = array[::-1].sum(axis = 0) - times = np.arange(datanumspectra)*datasamp + dedisp_ts = array[::-1].sum(axis=0) + times = np.arange(datanumspectra) * datasamp if integrate_ts: sp_pgplot.ppgplot.pgsvp(0.1, 0.7, 0.36, 0.44) - sp_pgplot.ppgplot.pgswin(datastart - start, datastart-start+duration, np.min(dedisp_ts), 1.05*np.max(dedisp_ts)) + sp_pgplot.ppgplot.pgswin(datastart - start, datastart - start + duration, np.min(dedisp_ts), + 1.05 * np.max(dedisp_ts)) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(times,dedisp_ts) - errx1 = np.array([0.60 * (datastart-start+duration)]) + sp_pgplot.ppgplot.pgline(times, dedisp_ts) + errx1 = np.array([0.60 * (datastart - start + duration)]) erry1 = np.array([0.60 * np.max(dedisp_ts)]) erry2 = np.array([np.std(dedisp_ts)]) errx2 = np.array([pulse_width]) sp_pgplot.ppgplot.pgerrb(5, errx1, erry1, errx2, 1.0) sp_pgplot.ppgplot.pgpt(errx1, erry1, -1) - + #### Plot Spectrum - Zerodm filter - On if integrate_spec: - spectrum_window = spec_width*pulse_width - window_width = int(spectrum_window/datasamp) - #burst_bin = int(datanumspectra*loc_pulse/downsamp) - burst_bin = int(nbins*loc_pulse/downsamp) - on_spec = array[..., burst_bin-window_width:burst_bin+window_width] + spectrum_window = spec_width * pulse_width + window_width = int(spectrum_window / datasamp) + # burst_bin = int(datanumspectra*loc_pulse/downsamp) + burst_bin = int(nbins * loc_pulse / downsamp) + on_spec = array[..., burst_bin - window_width:burst_bin + window_width] Dedisp_spec = on_spec.sum(axis=1) - freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) + freqs = np.linspace(min_freq, max_freq, len(Dedisp_spec)) sp_pgplot.ppgplot.pgsvp(0.70, 0.90, 0.05, 0.36) - sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05*np.max(Dedisp_spec), min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(np.min(Dedisp_spec), 1.05 * np.max(Dedisp_spec), min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(3) sp_pgplot.ppgplot.pgbox("BC", 0, 0, "BC", 0, 0) sp_pgplot.ppgplot.pgsci(1) - sp_pgplot.ppgplot.pgline(Dedisp_spec,freqs) + sp_pgplot.ppgplot.pgline(Dedisp_spec, freqs) sp_pgplot.ppgplot.pgmtxt('R', 1.8, 0.5, 0.5, "Zero-dm filtering - On") sp_pgplot.ppgplot.pgsch(0.7) sp_pgplot.ppgplot.pgmtxt('T', 1.8, 0.5, 0.5, "Spectrum") sp_pgplot.ppgplot.pgsch(0.8) - if disp_pulse: + if disp_pulse: # Sweeped waterfall plot Zerodm - OFF array = spdobj.data_nozerodm.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.44, 0.65) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(4) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') + sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration], + rangey=[min_freq, max_freq], image='apjgrey') delays = spdobj.dmsweep_delays freqs = spdobj.dmsweep_freqs sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration + sweepstart = sweeped_start - 0.2 * sweep_duration sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) + sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs) sp_pgplot.ppgplot.pgsci(1) sp_pgplot.ppgplot.pgslw(3) - + # Sweeped waterfall plot Zerodm - ON array = spdobj.data_zerodm.astype(np.float64) sp_pgplot.ppgplot.pgsvp(0.3, 0.70, 0.05, 0.25) - sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start+sweep_duration, min_freq, max_freq) + sp_pgplot.ppgplot.pgswin(sweeped_start, sweeped_start + sweep_duration, min_freq, max_freq) sp_pgplot.ppgplot.pgsch(0.8) sp_pgplot.ppgplot.pgslw(4) sp_pgplot.ppgplot.pgbox("BCST", 0, 0, "BCST", 0, 0) sp_pgplot.ppgplot.pgsch(3) - sp_pgplot.plot_waterfall(array,rangex = [sweeped_start, sweeped_start+sweep_duration],rangey = [min_freq, max_freq],image = 'apjgrey') + sp_pgplot.plot_waterfall(array, rangex=[sweeped_start, sweeped_start + sweep_duration], + rangey=[min_freq, max_freq], image='apjgrey') sp_pgplot.ppgplot.pgslw(5) - sweepstart = sweeped_start- 0.2*sweep_duration + sweepstart = sweeped_start - 0.2 * sweep_duration sp_pgplot.ppgplot.pgsci(0) - sp_pgplot.ppgplot.pgline(delays+sweepstart, freqs) + sp_pgplot.ppgplot.pgline(delays + sweepstart, freqs) sp_pgplot.ppgplot.pgsci(1) - + #### Figure texts sp_pgplot.ppgplot.pgsvp(0.05, 0.95, 0.8, 0.9) sp_pgplot.ppgplot.pgsch(0.65) sp_pgplot.ppgplot.pgslw(3) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" %RA) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.01, 0.0, "DEC: %s" %dec) - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.01, 0.0, "MJD: %f" %MJD) - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.01, 0.0, "Obs date: %s %s %s" %(date[0], date[1], date[2])) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.35, 0.0, "Telescope: %s" %telescope) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.35, 0.0, "DM: %.2f pc cm\\u-3\\d" %dm) + sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.01, 0.0, "RA: %s" % RA) + sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.01, 0.0, "DEC: %s" % dec) + sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.01, 0.0, "MJD: %f" % MJD) + sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.01, 0.0, "Obs date: %s %s %s" % (date[0], date[1], date[2])) + sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.35, 0.0, "Telescope: %s" % telescope) + sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.35, 0.0, "DM: %.2f pc cm\\u-3\\d" % dm) if sigma: - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\dMAX\\u: %.2f" %sigma) + sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\\dMAX\\u: %.2f" % sigma) else: - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\dMAX\\u: N/A") - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.35, 0.0, "Number of samples: %i" %nbins) - sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.65, 0.0, "Number of subbands: %i" %nsub) - sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.65, 0.0, "Pulse width: %.2f ms" %(pulse_width*1e3)) - sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.65, 0.0, "Sampling time: %.3f \gms" %(tsamp*1e6)) - sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.65, 0.0, "Bary pulse peak time: %.2f s" %(bary_start)) + sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.35, 0.0, "S/N\\dMAX\\u: N/A") + sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.35, 0.0, "Number of samples: %i" % nbins) + sp_pgplot.ppgplot.pgmtxt('T', -1.1, 0.65, 0.0, "Number of subbands: %i" % nsub) + sp_pgplot.ppgplot.pgmtxt('T', -2.5, 0.65, 0.0, "Pulse width: %.2f ms" % (pulse_width * 1e3)) + sp_pgplot.ppgplot.pgmtxt('T', -3.9, 0.65, 0.0, "Sampling time: %.3f \gms" % (tsamp * 1e6)) + sp_pgplot.ppgplot.pgmtxt('T', -5.3, 0.65, 0.0, "Bary pulse peak time: %.2f s" % (bary_start)) sp_pgplot.ppgplot.pgiden() sp_pgplot.ppgplot.pgclos() + def main(): parser = optparse.OptionParser(prog="plot_spd.py", - usage = "%prog [OPTIONS] INFILE (.spd file) INFILES (.singlepulse files)") + usage="%prog [OPTIONS] INFILE (.spd file) INFILES (.singlepulse files)") parser.add_option("-x", "--xwin", action="store_true", dest="xwin", default=False, help="Don't make a postscript plot, just use an X-window") - parser.add_option("-o", dest= "outfile", type = "string", default = "spdplot", - help= "give a base name to the saved plot. DM, time and" - "rank values will be added automatically" ) + parser.add_option("-o", dest="outfile", type="string", default="spdplot", + help="give a base name to the saved plot. DM, time and" + "rank values will be added automatically") parser.add_option("--spec-width", dest="spec_width", type="float", help="Twice this number times the pulse width" - "is the window around the pulse considered for the spectrum. (Default: 1.5)", + "is the window around the pulse considered for the spectrum. (Default: 1.5)", default=1.5) - parser.add_option("--loc", dest="loc_pulse", type="float", help="Fraction of the window length where the pulse is located." - "(Default: 0.5 half way in.)", + parser.add_option("--loc", dest="loc_pulse", type="float", + help="Fraction of the window length where the pulse is located." + "(Default: 0.5 half way in.)", default=0.5) parser.add_option("--just-waterfall", action="store_true", dest="just_waterfall", default=False, help="Just produce the waterfall plots.") @@ -513,7 +528,7 @@ def main(): parser.add_option("--show-sweep", action="store_true", dest="disp_pulse", default=False, help="Show dispersed pulse.(Default: Don't show dispersed pulse)") (options, args) = parser.parse_args() - + if len(args) == 0: raise ValueError("need a .spd file and .singlepulse files in that order.") if not args[0].endswith(".spd"): @@ -521,13 +536,15 @@ def main(): if len(args) == 2: tar = tarfile.open(args[1], "r:gz") # read in the tarball filenames = tar.getnames() # get the filenames - plot(args[0], filenames, options.spec_width, options.loc_pulse, options.xwin, options.outfile, options.just_waterfall, + plot(args[0], filenames, options.spec_width, options.loc_pulse, options.xwin, options.outfile, + options.just_waterfall, options.integrate_spec, options.integrate_ts, options.disp_pulse, tar) # make the sp plots tar.close() else: - plot(args[0], args[1:], options.spec_width, options.loc_pulse, options.xwin, options.outfile, options.just_waterfall, - options.integrate_spec, options.integrate_ts, options.disp_pulse, tar = None) # make the sp plots + plot(args[0], args[1:], options.spec_width, options.loc_pulse, options.xwin, options.outfile, + options.just_waterfall, + options.integrate_spec, options.integrate_ts, options.disp_pulse, tar=None) # make the sp plots if __name__ == '__main__': - main() + main() diff --git a/python/presto/singlepulse/rrattrap.py b/python/presto/singlepulse/rrattrap.py index 4c72db3c7..1132a1791 100755 --- a/python/presto/singlepulse/rrattrap.py +++ b/python/presto/singlepulse/rrattrap.py @@ -21,7 +21,7 @@ import matplotlib.pyplot as plt from presto.Pgplot import * import optparse -import presto.singlepulse.spio as spio +from presto.singlepulse import spio FRACTIONAL_SIGMA = 0.9 # change to 0.8? ALL_RANKS_ORDERED = [1,2,0,3,4,5,6]