Update intensity calibration

This commit is contained in:
linarphy 2023-07-18 14:45:00 +02:00
parent 0f7e074770
commit 7579a3dde1
No known key found for this signature in database
GPG key ID: 3D4AAAC3AD16E79C

View file

@ -3,6 +3,7 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from scipy.signal import medfilt , find_peaks from scipy.signal import medfilt , find_peaks
from scipy.signal import convolve as sp_convolve
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
import utils import utils
import sys import sys
@ -11,7 +12,8 @@ import pathlib
from scipy.ndimage import rotate from scipy.ndimage import rotate
from astropy.io import fits from astropy.io import fits
cache , filename , output , calibration , intensity_calibration , verbose , no_cache = None , None , None , None , None , False, False cache , filename , output , calibration , intensity_calibration
, verbose , no_cache = None , None , None , None , None , False, False
if len( sys.argv ) < 2: if len( sys.argv ) < 2:
raise Exception( 'spectrum.py: type \'spectrum.py -h\' for more information' ) raise Exception( 'spectrum.py: type \'spectrum.py -h\' for more information' )
@ -143,7 +145,9 @@ if 'cache' in files and files[ 'cache' ].is_file() and not no_cache:
with shelve.open( str( files[ 'cache' ] ) ) as cache: with shelve.open( str( files[ 'cache' ] ) ) as cache:
for key in [ 'data' , 'border' , 'calibrations' ]: for key in [ 'data' , 'border' , 'calibrations' ]:
if key not in cache: if key not in cache:
raise Exception( 'spectrum.py: missing data in cache file' ) raise Exception(
'spectrum.py: missing data in cache file'
)
data = cache[ 'data' ] data = cache[ 'data' ]
border = cache[ 'border'] border = cache[ 'border']
spectrum = cache[ 'spectrum' ] spectrum = cache[ 'spectrum' ]
@ -282,7 +286,11 @@ else:
] ]
number = 1000 number = 1000
position_peaks = np.zeros( number ) position_peaks = np.zeros( number )
indexes = np.linspace( border[ 'x' ][ 'min' ] , border[ 'x' ][ 'max' ] , number , dtype = int ) indexes = np.linspace(
border[ 'x' ][ 'min' ],
border[ 'x' ][ 'max' ],
number , dtype = int ,
)
for i in range( number ): for i in range( number ):
x = np.arange( x = np.arange(
border[ 'y' ][ 'min' ], border[ 'y' ][ 'min' ],
@ -382,12 +390,28 @@ else:
factor += 1 factor += 1
indexes = utils.near_value( indexes = utils.near_value(
list_ , list_ ,
np.min( list_ ) + ( np.mean( list_ ) - np.min( list_ ) ) / factor, np.min(
list_,
) + (
np.mean(
list_,
) - np.min(
list_,
)
) / factor,
) )
factor -= 1 factor -= 1
indexes = utils.near_value( indexes = utils.near_value(
list_ , list_ ,
np.min( list_ ) + ( np.mean( list_ ) - np.min( list_ ) ) / factor, np.min(
list_,
) + (
np.mean(
list_,
) - np.min(
list_,
)
) / factor,
) + border[ 'x' ][ 'min' ] + 100 # valid convolution only ) + border[ 'x' ][ 'min' ] + 100 # valid convolution only
spectrum[ 'x' ] = { spectrum[ 'x' ] = {
@ -418,7 +442,8 @@ else:
list_ , list_ ,
height = np.mean( list_ ) + ( 1 - np.mean( list_ ) ) / 2, height = np.mean( list_ ) + ( 1 - np.mean( list_ ) ) / 2,
)[0] )[0]
number = 0.01 + abs( len( peaks ) - 90 ) # the lower the better number = 0.01 + abs( len( peaks ) - 90 ) # the lower the
# better
intensity = np.sum( list_[ peaks ] ) intensity = np.sum( list_[ peaks ] )
return intensity / ( amplitude ** 2 * number ) return intensity / ( amplitude ** 2 * number )
@ -439,22 +464,38 @@ else:
'valid' , 'valid' ,
) )
indicators /= np.max( indicators ) indicators /= np.max( indicators )
calibration_areas = utils.consecutive( np.where( indicators > 1 / 1000 )[0] ) calibration_areas = utils.consecutive(
np.where(
indicators > 1 / 1000,
)[0],
)
calibration_areas = [ calibration_areas = [
[ calibration_area for calibration_area in calibration_areas if ( [
calibration_area for calibration_area in calibration_areas if (
calibration_area[0] < ( calibration_area[0] < (
border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ] border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
) / 2 ) / 2
) ], )
[ calibration_area for calibration_area in calibration_areas if ( ],
[
calibration_area for calibration_area in calibration_areas if (
calibration_area[0] > ( calibration_area[0] > (
border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ] border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
) / 2 ) / 2
) ], )
],
] ]
calibration_sizes = [ calibration_sizes = [
[ len( calibration_area ) for calibration_area in calibration_areas[0] ], [
[ len( calibration_area ) for calibration_area in calibration_areas[1] ], len(
calibration_area
) for calibration_area in calibration_areas[0]
],
[
len(
calibration_area
) for calibration_area in calibration_areas[1]
],
] ]
calibrations_y = [ calibrations_y = [
calibration_areas[0][ calibration_areas[0][
@ -506,7 +547,9 @@ else:
Calibration Calibration
""" """
wavelengths = np.arange( spectrum[ 'x' ][ 'max' ] - spectrum[ 'x' ][ 'min' ] ) wavelengths = np.arange(
spectrum[ 'x' ][ 'max' ] - spectrum[ 'x' ][ 'min' ]
)
if calibration != None: if calibration != None:
if verbose: if verbose:
@ -530,19 +573,48 @@ if calibration != None:
] ) * 1e-10 ] ) * 1e-10
start , end = 5000 , 18440 start , end = 5000 , 18440
polyval_before = np.polyfit( abciss[ : start ] , mean_data[ : start ] , 2 ) polyval_before = np.polyfit(
polyval_middle = np.polyfit( abciss[ start : end ] , mean_data[ start : end ] , 2 ) abciss[ : start ] ,
polyval_end = np.polyfit( abciss[ end : ] , mean_data[ end : ] , 2 ) mean_data[ : start ],
2 ,
)
polyval_middle = np.polyfit(
abciss[ start : end ] ,
mean_data[ start : end ],
2 ,
)
polyval_end = np.polyfit(
abciss[ end : ] ,
mean_data[ end : ],
2 ,
)
mean_data[ : start ] = mean_data[ : start ] - np.polyval( polyval_before , abciss[ : start ] ) mean_data[ : start ] = mean_data[ : start ] - np.polyval(
mean_data[ start : end ] = mean_data[ start : end ] - np.polyval( polyval_middle , abciss[ start : end ] ) polyval_before ,
mean_data[ end : ] = mean_data[ end : ] - np.polyval( polyval_end , abciss[ end : ] ) abciss[ : start ],
)
mean_data[ start : end ] = mean_data[ start : end ] - np.polyval(
polyval_middle ,
abciss[ start : end ],
)
mean_data[ end : ] = mean_data[ end : ] - np.polyval(
polyval_end ,
abciss[ end : ],
)
mean_data_normalized = mean_data.copy() # normalization mean_data_normalized = mean_data.copy() # normalization
mean_data_normalized -= np.min( mean_data_normalized ) mean_data_normalized -= np.min( mean_data_normalized )
mean_data_normalized /= np.max( mean_data_normalized ) mean_data_normalized /= np.max( mean_data_normalized )
lines = [ np.mean( cons ) for cons in utils.consecutive( np.where( mean_data_normalized < 0.3 )[0] ) ] lines = [
np.mean(
cons,
) for cons in utils.consecutive(
np.where(
mean_data_normalized < 0.3,
)[0],
)
]
lines = np.array( lines[ : len( ref ) - 1 ] ) # Balmer discontinuity lines = np.array( lines[ : len( ref ) - 1 ] ) # Balmer discontinuity
ref = ref[ 1 : ] # start with H-beta ref = ref[ 1 : ] # start with H-beta
@ -578,17 +650,50 @@ bias = {
), ),
} }
print( bias[ 'top' ] , bias[ 'down' ] ) mean_bias = sp_convolve(
mean_bias = np.mean( [ bias[ 'top' ] , bias[ 'down' ] ] , axis = 0 ) / 2 np.mean(
print( 'mean_bias spectrum: ' + str( mean_bias ) ) [
bias[ 'top' ],
bias[ 'down' ],
] ,
axis = 0,
) ,
np.ones(
(
50,
),
) ,
'same',
) / 50
plt.plot( bias[ 'top' ] , label = 'top' )
plt.plot( bias[ 'down' ] , label = 'down' )
plt.plot( mean_bias , label = 'mean' )
plt.legend()
plt.show()
data[
: ,
spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
] = data[
:,
spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
] - mean_bias
if verbose: if verbose:
print( 'bias substraction finished' ) print( 'bias substraction finished' )
ETA_bias = np.load( 'ETA_bias.npy' )
plt.plot( mean_bias , label = 'spectrum' )
plt.plot( ETA_bias , label = 'ETA' )
plt.legend()
plt.show()
mean_data = np.mean( data[ mean_data = np.mean( data[
spectrum[ 'y' ][ 'min' ] : spectrum[ 'y' ][ 'max' ], spectrum[ 'y' ][ 'min' ] : spectrum[ 'y' ][ 'max' ],
spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ] spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
] , axis = 0 ) - mean_bias ] , axis = 0 )
if intensity != None: if intensity != None:
if verbose: if verbose:
@ -597,120 +702,75 @@ if intensity != None:
intensity_file = pathlib.Path( intensity ) intensity_file = pathlib.Path( intensity )
with shelve.open( str( intensity_file ) ) as storage: with shelve.open( str( intensity_file ) ) as storage:
intensity_stairs = storage[ 'data' ] ETA_stairs = storage[ 'data' ]
intensity_wavelengths = storage[ 'wavelength' ] * 1e-10 ETA_wavelengths = storage[ 'wavelength' ] * 1e-10
wavelengths = wavelengths[ # remove wavelengths outside range wavelengths = wavelengths[ # remove wavelengths outside range
np.where( np.where(
np.logical_and( np.logical_and(
wavelengths > np.min( intensity_wavelengths ), wavelengths > np.min( ETA_wavelengths ),
wavelengths < np.max( intensity_wavelengths ), wavelengths < np.max( ETA_wavelengths ),
), ),
) )
] ]
intensity_wavelengths = intensity_wavelengths[ # remove intensity_wavelengths outside range ETA_wavelengths = ETA_wavelengths[ # remove intensity_wavelengths
# outside range
np.where( np.where(
np.logical_and( np.logical_and(
intensity_wavelengths > np.min( wavelengths ), ETA_wavelengths > np.min( wavelengths ),
intensity_wavelengths < np.max( wavelengths ), ETA_wavelengths < np.max( wavelengths ),
), ),
) )
] ]
if len( wavelengths ) == 0: if len( wavelengths ) == 0:
raise Exception( 'spectrum.py: spectrum and ETA does not share any common wavelengths' ) raise Exception(
'spectrum.py: spectrum and ETA does not share any common' +
'wavelengths'
)
step = 0.2 # depends of ETA source #TODO step = 0.2 # depends of ETA source #TODO
final_intensity = np.zeros( len( wavelengths ) ) final_intensity = np.zeros( len( wavelengths ) )
for index in range( len( wavelengths ) ): for index in range( len( wavelengths ) ):
intensity_value = mean_data[ index ] wavelength = wavelengths[ index ]
intensity_wavelength = wavelengths[ index ] current_ETA_index = np.argmin(
intensity_index = utils.near_value( # list of index corresponding to index for intensity_stairs ETA_wavelengths - wavelength
intensity_wavelengths,
intensity_wavelength ,
) )
ETA_wavelength = ETA_wavelengths[ current_ETA_index ]
if len( intensity_index ) != 1: # too much or no intensity found near value current_stair = ETA_stairs[
final_intensity[ index ] = - 1
continue
intensity_index = intensity_index[0]
indexes_stair_lower = np.where( # stairs lower than intensity value
intensity_stairs[
: , : ,
intensity_index, current_ETA_index,
0 0
] < intensity_value
)[0]
if len( indexes_stair_lower ) == 0: # intensity value outside ETA (below)
final_intensity[ index ] = 0
continue
if len( indexes_stair_lower ) != intensity_stairs.shape[0] - indexes_stair_lower[0]: # stairs intensity does not decrease with index as it should
# could indicate an artefact in ETA
indexes_stair_lower = [
int( np.mean(
[
intensity_stairs.shape[0] -
indexes_stair_lower[0] ,
len( indexes_stair_lower )
]
) )
] ]
indexes_stair_higher = np.where( # stairs higher than intensity value abciss_intensity_curve = np.arange(
intensity_stairs[ (
: , ETA_stairs.shape[0] - 1 # we want 11 value, no 12
intensity_index, ) * step ,
0 - step / 2, # a little more than - step to remove negative
] > intensity_value - step ,
)
function_intensity_curve = lambda x , a , b : a * np.log( # log
# because the curve is inverted (exp on the
# other side )
x
) + b
results = curve_fit(
function_intensity_curve,
current_stair ,
abciss_intensity_curve ,
)[0] )[0]
if len( indexes_stair_higher ) == 0: # intensity value outside ETA (upper)
final_intensity[ index ] = np.exp( final_intensity[ index ] = np.exp(
step * intensity_stairs.shape[0] function_intensity_curve(
mean_data[ index ],
*results ,
),
) )
continue
if len( indexes_stair_higher ) - 1 != indexes_stair_higher[-1]: # stairs intensity does not decrease with index as it should
indexes_stair_higher = [
int( np.mean(
[
indexes_stair_higher[ - 1 ] ,
len( indexes_stair_higher ) - 1,
],
) ),
]
indexes_stair_higher = [
indexes_stair_lower[ 0 ] - 1,
]
index_stair = {
'higher': indexes_stair_higher[-1],
'lower' : indexes_stair_lower[0] ,
}
if index_stair[ 'lower' ] - index_stair[ 'higher' ] != 1: # ETA curve should decrease
raise Exception( 'spectrum.py: given intensity stairs (from ETA) are missformed' )
stair_intensity = {
'higher': intensity_stairs[ index_stair[ 'higher' ] , intensity_index , 0 ],
'lower' : intensity_stairs[ index_stair[ 'lower' ] , intensity_index , 0 ],
}
index_polyval = np.polyfit( # fraction stair index from intensity value
[ stair_intensity[ 'higher' ] , stair_intensity[ 'lower' ] ],
[ index_stair[ 'higher' ] , index_stair[ 'lower' ] ],
1 ,
)
true_intensity_value = ( intensity_stairs.shape[0] - np.polyval( index_polyval , intensity_value ) ) * step
final_intensity[index] = np.exp( true_intensity_value )
if final_intensity[ index ] > np.exp( 2.2 ):
final_intensity[ index ] = np.exp( 2.2 )
if verbose: if verbose:
print( 'intensity calibration finished' ) print( 'intensity calibration finished' )