Example Data Reduction

[ ]:
import os
os.environ['OPENTSDB_PYTHON_METRICS_TEST_MODE'] = 'True'
[ ]:
from banzai.calibrations import make_master_calibrations
import requests
from banzai_nres import settings
from banzai import dbs
from banzai.utils.stage_utils import run_pipeline_stages
import logging
from banzai.logs import set_log_level
from glob import glob

import pkg_resources

Setup logging and some settings show we know where the DB should be.

[ ]:
set_log_level('DEBUG')
logger = logging.getLogger('banzai')
[ ]:
os.environ['DB_ADDRESS'] = 'sqlite:///test_data/test.db'

settings.processed_path= os.path.join(os.getcwd(), 'test_data')
settings.fpack=True
settings.db_address = os.environ['DB_ADDRESS']
settings.reduction_level = 92
[ ]:
# set up the context object.
import banzai.main
context = banzai.main.parse_args(settings, parse_system_args=False)

Download some test data from the archive

[ ]:
# make directories for the test dataset.
raw_data_dir = 'test_data/lsc/nres01/20180313/raw'
os.makedirs(raw_data_dir, exist_ok=True)
bpm_dir = 'test_data/lsc/nres01/bpm/'
os.makedirs(bpm_dir, exist_ok=True)
phoenix_dir = 'test_data/phoenix_models'
os.makedirs(phoenix_dir, exist_ok=True)
raw_phoenix_dir = 'test_data/raw_phoenix_models'
os.makedirs(raw_phoenix_dir, exist_ok=True)
[ ]:
# choose which files we will download for our test dataset.
bpm_filename = 'bpm-lsc-nres01-fl09-20180215.fits.fz'
test_filenames = ['lscnrs01-fl09-20180313-0001-w00.fits.fz', 'lscnrs01-fl09-20180313-0002-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0003-w00.fits.fz', 'lscnrs01-fl09-20180313-0004-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0005-w00.fits.fz', 'lscnrs01-fl09-20180313-0006-a00.fits.fz',
                  'lscnrs01-fl09-20180313-0007-a00.fits.fz', 'lscnrs01-fl09-20180313-0008-a00.fits.fz',
                  'lscnrs01-fl09-20180313-0009-w00.fits.fz', 'lscnrs01-fl09-20180313-0010-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0011-w00.fits.fz', 'lscnrs01-fl09-20180313-0012-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0013-w00.fits.fz', 'lscnrs01-fl09-20180313-0014-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0015-w00.fits.fz', 'lscnrs01-fl09-20180313-0016-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0017-w00.fits.fz', 'lscnrs01-fl09-20180313-0018-w00.fits.fz',
                  'lscnrs01-fl09-20180313-0019-a00.fits.fz', 'lscnrs01-fl09-20180313-0020-a00.fits.fz',
                  'lscnrs01-fl09-20180313-0021-a00.fits.fz', 'lscnrs01-fl09-20180313-0022-a00.fits.fz',
                  'lscnrs01-fl09-20180313-0023-a00.fits.fz', 'lscnrs01-fl09-20180313-0042-b00.fits.fz',
                  'lscnrs01-fl09-20180313-0043-b00.fits.fz', 'lscnrs01-fl09-20180313-0044-b00.fits.fz',
                  'lscnrs01-fl09-20180313-0045-b00.fits.fz', 'lscnrs01-fl09-20180313-0046-b00.fits.fz',
                  'lscnrs01-fl09-20180313-0047-d00.fits.fz', 'lscnrs01-fl09-20180313-0048-d00.fits.fz',
                  'lscnrs01-fl09-20180313-0049-d00.fits.fz', 'lscnrs01-fl09-20180313-0028-e00.fits.fz',
                  'lscnrs01-fl09-20180313-0029-e00.fits.fz', 'lscnrs01-fl09-20180313-0030-e00.fits.fz',
                  'lscnrs01-fl09-20180313-0031-e00.fits.fz']

bpm_frame_id = '22529799'
test_frame_ids = ['8148793', '8148822', '8148805', '8148824', '8148826', '8148876', '8148898',
                  '8148932', '8148960', '8148978', '8149024', '8149104', '8149068', '8149090',
                  '8149128', '8149173', '8149217', '8151252', '8149314', '8149388', '8149469',
                  '8149547', '8149570', '8156341', '8156360', '8156366', '8156376', '8156385',
                  '8156433', '8156500', '8156560', '8151997', '8152050', '8152110', '8152153']
[ ]:
# THIS DOWNLOADS A NEW DATA SET. DO NOT RUN IF YOU HAVE ALREADY DOWNLOADED THE DATA
# Copy the data into the correct directory
for test_filename, frame_id in zip(test_filenames, test_frame_ids):
    archive_url = f'https://archive-api.lco.global/frames/{frame_id}'
    frame_info = requests.get(archive_url).json()
    with open(os.path.join(raw_data_dir, test_filename), 'wb') as f:
        f.write(requests.get(frame_info['url']).content)

archive_url = f'https://archive-api.lco.global/frames/{bpm_frame_id}'
frame_info = requests.get(archive_url).json()
with open(os.path.join(bpm_dir, bpm_filename), 'wb') as f:
    f.write(requests.get(frame_info['url']).content)
[ ]:
#Download the phoenix models and get them into the correct format.
#Likewise, do not run if you have already downloaded the data.
os.system(f'wget ftp://phoenix.astro.physik.uni-goettingen.de/v2.0/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/lte05700-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits -P {raw_phoenix_dir}/')
os.system(f'wget ftp://phoenix.astro.physik.uni-goettingen.de/v2.0/HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits -P {raw_phoenix_dir}/')
[ ]:
os.system(f'banzai_nres_munge_phoenix --input-dir={raw_phoenix_dir} --output-dir={phoenix_dir}')

Initialize the db and context object for the rest of the pipeline

[ ]:
# THIS MAKES A FRESH DATABASE. DO NOT RUN ME UNLESS YOU WANT TO DELETE YOUR test.db AND MAKE A FRESH ONE
full_phoenix_path=os.path.join(os.getcwd(), phoenix_dir)
os.system(f'banzai_nres_create_db --db-address={os.environ["DB_ADDRESS"]}')
os.system(f'banzai_nres_populate_phoenix_models --model-location={full_phoenix_path} --db-address={os.environ["DB_ADDRESS"]}')
os.system(f'banzai_add_site --site elp --latitude 30.67986944 --longitude -104.015175 --elevation 2027 --timezone -6 --db-address={os.environ["DB_ADDRESS"]}')
os.system(f'banzai_add_site --site lsc --latitude -30.1673833333 --longitude -70.8047888889 --elevation 2198 --timezone -4 --db-address={os.environ["DB_ADDRESS"]}')
os.system(f'banzai_add_instrument --site lsc --camera fl09 --name nres01 --instrument-type 1m0-NRES-SciCam --db-address={os.environ["DB_ADDRESS"]}')
os.system(f'banzai_add_instrument --site elp --camera fl17 --name nres02 --instrument-type 1m0-NRES-SciCam --db-address={os.environ["DB_ADDRESS"]}')
[ ]:
for bpm_filename in glob('test_data/*/nres??/bpm/*bpm*'):
    logger.info(f'adding bpm {bpm_filename} to the database')
    os.system(f'banzai_nres_add_bpm --filename {bpm_filename} --db-address={os.environ["DB_ADDRESS"]}')

Get the instrument record

[ ]:
instrument = dbs.get_instruments_at_site('lsc', settings.db_address)[0]

Process and Stack Bias Files

[ ]:
bias_files = glob(os.path.join('test_data/*/nres??/*/raw/*b00*'))
for bias_file in bias_files:
    run_pipeline_stages([{'path': bias_file}], context)
[ ]:
def mark_frames_as_good(filenames):
    for filename in glob(f'test_data/*/nres??/*/processed/{filenames}'):
        dbs.mark_frame(os.path.basename(filename), "good", db_address=os.environ['DB_ADDRESS'])
[ ]:
mark_frames_as_good('*b92*')
[ ]:
make_master_calibrations(instrument, 'BIAS', '2017-01-01', '2019-01-01', context)

Process and Stack Dark Files

[ ]:
dark_files = glob('test_data/*/nres??/*/raw/*d00*')
for dark_file in dark_files:
    run_pipeline_stages([{'path': dark_file}], context)
[ ]:
mark_frames_as_good('*d92*')
[ ]:
make_master_calibrations(instrument, 'DARK', '2017-01-01', '2019-01-01', context)

Process and stack lamp flats. This is where we also solve for the traces and profile.

[ ]:
flat_files = glob('test_data/*/nres??/*/raw/*w00*')
for flat_file in flat_files:
    run_pipeline_stages([{'path': flat_file}], context)
[ ]:
make_master_calibrations(instrument, 'LAMPFLAT', '2017-01-01', '2019-01-01', context)

Process and stack arc lamps. Find the wavelength solution

[ ]:
arc_files = glob('test_data/*/nres??/*/raw/*a00*')
for arc_file in arc_files:
    run_pipeline_stages([{'path': arc_file}], context)
[ ]:
make_master_calibrations(instrument, 'DOUBLE', '2017-01-01', '2019-01-01', context)

Process the science spectra

[ ]:
science_files = glob('test_data/*/nres??/*/raw/*e00*')
for science_file in science_files:
    run_pipeline_stages([{'path': science_file}], context)
[ ]: