diff --git a/spectrum.py b/spectrum.py
index 7c2d3b2..e2b8589 100755
--- a/spectrum.py
+++ b/spectrum.py
@@ -3,6 +3,7 @@
 import numpy as np
 import matplotlib.pyplot as plt
 from scipy.signal import medfilt , find_peaks
+from scipy.signal import convolve as sp_convolve
 from scipy.optimize import curve_fit
 import utils
 import sys
@@ -11,7 +12,8 @@ import pathlib
 from scipy.ndimage import rotate
 from astropy.io import fits
 
-cache , filename , output , calibration , intensity_calibration , verbose , no_cache = None , None , None , None , None , False, False
+cache , filename , output , calibration , intensity_calibration
+, verbose , no_cache = None , None , None , None , None , False, False
 if len( sys.argv ) < 2:
     raise Exception( 'spectrum.py: type \'spectrum.py -h\' for more information' )
 
@@ -143,7 +145,9 @@ if 'cache' in files and files[ 'cache' ].is_file() and not no_cache:
     with shelve.open( str( files[ 'cache' ] ) ) as cache:
         for key in [ 'data' , 'border' , 'calibrations' ]:
             if key not in cache:
-                raise Exception( 'spectrum.py: missing data in cache file' )
+                raise Exception(
+                    'spectrum.py: missing data in cache file'
+                )
         data         = cache[ 'data' ]
         border       = cache[ 'border']
         spectrum     = cache[ 'spectrum' ]
@@ -282,7 +286,11 @@ else:
     ]
     number         = 1000
     position_peaks = np.zeros( number )
-    indexes        = np.linspace( border[ 'x' ][ 'min' ] , border[ 'x' ][ 'max' ] , number , dtype = int )
+    indexes        = np.linspace(
+        border[ 'x' ][ 'min' ],
+        border[ 'x' ][ 'max' ],
+        number , dtype = int  ,
+    )
     for i in range( number ):
         x = np.arange(
             border[ 'y' ][ 'min' ],
@@ -338,7 +346,7 @@ else:
         axis = 1,
     )
     indexes = utils.near_value(
-        list_                                                        ,
+        list_                                                         ,
         np.mean( list_ ) + ( np.max( list_ ) - np.mean( list_ ) ) / 10,
     ) + border[ 'y' ][ 'min' ]
 
@@ -381,13 +389,29 @@ else:
     while len( indexes ) == 2:
         factor += 1
         indexes = utils.near_value(
-            list_                                                            ,
-            np.min( list_ ) + ( np.mean( list_ ) - np.min( list_ ) ) / factor,
+            list_     ,
+            np.min(
+                list_,
+            ) + (
+                np.mean(
+                    list_,
+                ) - np.min(
+                    list_,
+                )
+            ) / factor,
         )
     factor -= 1
     indexes = utils.near_value(
-        list_                                                            ,
-        np.min( list_ ) + ( np.mean( list_ ) - np.min( list_ ) ) / factor,
+        list_     ,
+        np.min(
+            list_,
+        ) + (
+            np.mean(
+                list_,
+            ) - np.min(
+                list_,
+            )
+        ) / factor,
     ) + border[ 'x' ][ 'min' ] + 100 # valid convolution only
 
     spectrum[ 'x' ] = {
@@ -418,7 +442,8 @@ else:
             list_                                                   ,
             height = np.mean( list_ ) + ( 1 - np.mean( list_ ) ) / 2,
         )[0]
-        number    = 0.01 + abs( len( peaks ) - 90 ) # the lower the better
+        number    = 0.01 + abs( len( peaks ) - 90 ) # the lower the
+                                                    # better
         intensity = np.sum( list_[ peaks ] )
         return intensity / ( amplitude ** 2 * number )
 
@@ -439,22 +464,38 @@ else:
         'valid'      ,
     )
     indicators       /= np.max( indicators )
-    calibration_areas = utils.consecutive( np.where( indicators > 1 / 1000 )[0] )
+    calibration_areas = utils.consecutive(
+        np.where(
+            indicators > 1 / 1000,
+        )[0],
+    )
     calibration_areas = [
-        [ calibration_area for calibration_area in calibration_areas if (
-            calibration_area[0] < (
-                border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
-            ) / 2
-        ) ],
-        [ calibration_area for calibration_area in calibration_areas if (
-            calibration_area[0] > (
-                border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
-            ) / 2
-        ) ],
+        [
+            calibration_area for calibration_area in calibration_areas if (
+                calibration_area[0] < (
+                    border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
+                ) / 2
+            )
+        ],
+        [
+            calibration_area for calibration_area in calibration_areas if (
+                calibration_area[0] > (
+                    border[ 'y' ][ 'max' ] - border[ 'y' ][ 'min' ]
+                ) / 2
+            )
+        ],
     ]
     calibration_sizes = [
-        [ len( calibration_area ) for calibration_area in calibration_areas[0] ],
-        [ len( calibration_area ) for calibration_area in calibration_areas[1] ],
+        [
+            len(
+                calibration_area
+            ) for calibration_area in calibration_areas[0]
+        ],
+        [
+            len(
+                calibration_area
+            ) for calibration_area in calibration_areas[1]
+        ],
     ]
     calibrations_y = [
         calibration_areas[0][
@@ -506,7 +547,9 @@ else:
 Calibration
 """
 
-wavelengths = np.arange( spectrum[ 'x' ][ 'max' ] - spectrum[ 'x' ][ 'min' ] )
+wavelengths = np.arange(
+    spectrum[ 'x' ][ 'max' ] - spectrum[ 'x' ][ 'min' ]
+)
 
 if calibration != None:
     if verbose:
@@ -530,19 +573,48 @@ if calibration != None:
     ] ) * 1e-10
     start , end = 5000 , 18440
 
-    polyval_before = np.polyfit( abciss[       : start ] , mean_data[       : start ] , 2 )
-    polyval_middle = np.polyfit( abciss[ start : end   ] , mean_data[ start : end   ] , 2 )
-    polyval_end    = np.polyfit( abciss[ end   :       ] , mean_data[ end   :       ] , 2 )
+    polyval_before = np.polyfit(
+        abciss[ : start ]   ,
+        mean_data[ : start ],
+        2                   ,
+    )
+    polyval_middle = np.polyfit(
+        abciss[ start : end ]   ,
+        mean_data[ start : end ],
+        2                       ,
+    )
+    polyval_end = np.polyfit(
+        abciss[ end : ]   ,
+        mean_data[ end : ],
+        2                 ,
+    )
 
-    mean_data[       : start ] = mean_data[       : start ] - np.polyval( polyval_before , abciss[       : start ] )
-    mean_data[ start : end   ] = mean_data[ start : end   ] - np.polyval( polyval_middle , abciss[ start : end   ] )
-    mean_data[ end   :       ] = mean_data[ end   :       ] - np.polyval( polyval_end    , abciss[ end   :       ] )
+    mean_data[ : start ] = mean_data[ : start ] - np.polyval(
+        polyval_before   ,
+        abciss[ : start ],
+    )
+    mean_data[ start : end ] = mean_data[ start : end ] - np.polyval(
+        polyval_middle       ,
+        abciss[ start : end ],
+    )
+    mean_data[ end : ] = mean_data[ end : ] - np.polyval(
+        polyval_end    ,
+        abciss[ end : ],
+    )
 
     mean_data_normalized  = mean_data.copy() # normalization
     mean_data_normalized -= np.min( mean_data_normalized )
     mean_data_normalized /= np.max( mean_data_normalized )
 
-    lines = [ np.mean( cons ) for cons in utils.consecutive( np.where( mean_data_normalized < 0.3 )[0] ) ]
+    lines = [
+        np.mean(
+            cons,
+        ) for cons in utils.consecutive(
+            np.where(
+                mean_data_normalized < 0.3,
+            )[0],
+        )
+    ]
     lines = np.array( lines[ : len( ref ) - 1 ] ) # Balmer discontinuity
 
     ref   = ref[ 1 : ] # start with H-beta
@@ -578,17 +650,50 @@ bias = {
     ),
 }
 
-print( bias[ 'top' ] , bias[ 'down' ] )
-mean_bias = np.mean( [ bias[ 'top' ] , bias[ 'down' ] ] , axis = 0 ) / 2
-print( 'mean_bias spectrum: ' + str( mean_bias ) )
+mean_bias = sp_convolve(
+    np.mean(
+        [
+            bias[ 'top'  ],
+            bias[ 'down' ],
+        ]       ,
+        axis = 0,
+    )     ,
+    np.ones(
+        (
+            50,
+        ),
+    )     ,
+    'same',
+) / 50
+
+plt.plot( bias[ 'top' ]  , label = 'top'  )
+plt.plot( bias[ 'down' ] , label = 'down' )
+plt.plot( mean_bias      , label = 'mean' )
+plt.legend()
+plt.show()
+
+data[
+                           :                       ,
+    spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
+] = data[
+    :,
+    spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
+] - mean_bias
 
 if verbose:
     print( 'bias substraction finished' )
 
+ETA_bias = np.load( 'ETA_bias.npy' )
+
+plt.plot( mean_bias , label = 'spectrum' )
+plt.plot( ETA_bias  , label = 'ETA'      )
+plt.legend()
+plt.show()
+
 mean_data = np.mean( data[
     spectrum[ 'y' ][ 'min' ] : spectrum[ 'y' ][ 'max' ],
     spectrum[ 'x' ][ 'min' ] : spectrum[ 'x' ][ 'max' ]
-] , axis = 0 ) - mean_bias
+] , axis = 0 )
 
 if intensity != None:
     if verbose:
@@ -597,121 +702,76 @@ if intensity != None:
     intensity_file = pathlib.Path( intensity )
 
     with shelve.open( str( intensity_file ) ) as storage:
-        intensity_stairs      = storage[ 'data'       ]
-        intensity_wavelengths = storage[ 'wavelength' ] * 1e-10
+        ETA_stairs      = storage[ 'data'       ]
+        ETA_wavelengths = storage[ 'wavelength' ] * 1e-10
 
     wavelengths = wavelengths[ # remove wavelengths outside range
         np.where(
             np.logical_and(
-                wavelengths > np.min( intensity_wavelengths ),
-                wavelengths < np.max( intensity_wavelengths ),
+                wavelengths > np.min( ETA_wavelengths ),
+                wavelengths < np.max( ETA_wavelengths ),
             ),
         )
     ]
-    intensity_wavelengths = intensity_wavelengths[ # remove intensity_wavelengths outside range
+    ETA_wavelengths = ETA_wavelengths[ # remove intensity_wavelengths
+                                       # outside range
         np.where(
             np.logical_and(
-                intensity_wavelengths > np.min( wavelengths ),
-                intensity_wavelengths < np.max( wavelengths ),
+                ETA_wavelengths > np.min( wavelengths ),
+                ETA_wavelengths < np.max( wavelengths ),
             ),
         )
     ]
 
     if len( wavelengths ) == 0:
-        raise Exception( 'spectrum.py: spectrum and ETA does not share any common wavelengths' )
+        raise Exception(
+            'spectrum.py: spectrum and ETA does not share any common' +
+            'wavelengths'
+        )
 
     step = 0.2 # depends of ETA source #TODO
 
     final_intensity = np.zeros( len( wavelengths ) )
 
     for index in range( len( wavelengths ) ):
-        intensity_value      = mean_data[ index ]
-        intensity_wavelength = wavelengths[ index ]
-        intensity_index      = utils.near_value( # list of index corresponding to index for intensity_stairs
-            intensity_wavelengths,
-            intensity_wavelength ,
+        wavelength = wavelengths[ index ]
+        current_ETA_index = np.argmin(
+            ETA_wavelengths - wavelength
         )
+        ETA_wavelength = ETA_wavelengths[ current_ETA_index ]
 
-        if len( intensity_index ) != 1: # too much or no intensity found near value
-            final_intensity[ index ] = - 1
-            continue
-        intensity_index = intensity_index[0]
+        current_stair = ETA_stairs[
+            :                ,
+            current_ETA_index,
+            0
+        ]
 
-        indexes_stair_lower = np.where( # stairs lower than intensity value
-            intensity_stairs[
-                :              ,
-                intensity_index,
-                0
-            ] < intensity_value
+        abciss_intensity_curve = np.arange(
+            (
+                ETA_stairs.shape[0] - 1 # we want 11 value, no 12
+            ) * step  ,
+            - step / 2, # a little more than - step to remove negative
+            - step    ,
+        )
+        function_intensity_curve = lambda x , a , b : a * np.log( # log
+                            # because the curve is inverted (exp on the
+                            # other side )
+            x
+        ) + b
+
+        results = curve_fit(
+            function_intensity_curve,
+            current_stair           ,
+            abciss_intensity_curve  ,
         )[0]
 
-        if len( indexes_stair_lower ) == 0: # intensity value outside ETA (below)
-            final_intensity[ index ] = 0
-            continue
-        if len( indexes_stair_lower ) != intensity_stairs.shape[0] - indexes_stair_lower[0]: # stairs intensity does not decrease with index as it should
-                                                                                             # could indicate an artefact in ETA
-            indexes_stair_lower = [
-                int( np.mean(
-                    [
-                        intensity_stairs.shape[0] -
-                        indexes_stair_lower[0]     ,
-                        len( indexes_stair_lower )
-                    ]
-                ) )
-            ]
-
-        indexes_stair_higher = np.where( # stairs higher than intensity value
-            intensity_stairs[
-                :              ,
-                intensity_index,
-                0
-            ] > intensity_value
-        )[0]
-
-        if len( indexes_stair_higher ) == 0: # intensity value outside ETA (upper)
-            final_intensity[ index ] = np.exp(
-                step * intensity_stairs.shape[0]
-            )
-            continue
-        if len( indexes_stair_higher ) - 1 != indexes_stair_higher[-1]: # stairs intensity does not decrease with index as it should
-            indexes_stair_higher = [
-                int( np.mean(
-                    [
-                        indexes_stair_higher[ - 1 ]    ,
-                        len( indexes_stair_higher ) - 1,
-                    ],
-                ) ),
-            ]
-            indexes_stair_higher = [
-                indexes_stair_lower[ 0 ] - 1,
-            ]
-
-        index_stair = {
-            'higher': indexes_stair_higher[-1],
-            'lower' : indexes_stair_lower[0]  ,
-        }
-
-        if index_stair[ 'lower' ] - index_stair[ 'higher' ] != 1: # ETA curve should decrease
-            raise Exception( 'spectrum.py: given intensity stairs (from ETA) are missformed' )
-
-        stair_intensity = {
-            'higher': intensity_stairs[ index_stair[ 'higher' ] , intensity_index , 0 ],
-            'lower' : intensity_stairs[ index_stair[ 'lower'  ] , intensity_index , 0 ],
-        }
-
-        index_polyval = np.polyfit(            # fraction stair index from intensity value
-            [ stair_intensity[ 'higher' ] , stair_intensity[ 'lower' ] ],
-            [     index_stair[ 'higher' ] ,     index_stair[ 'lower' ] ],
-            1                                                           ,
+        final_intensity[ index ] = np.exp(
+            function_intensity_curve(
+                mean_data[ index ],
+                *results          ,
+            ),
         )
 
-        true_intensity_value = ( intensity_stairs.shape[0] - np.polyval( index_polyval , intensity_value ) ) * step
-
-        final_intensity[index] = np.exp( true_intensity_value )
-
-        if final_intensity[ index ] > np.exp( 2.2 ):
-            final_intensity[ index ] = np.exp( 2.2 )
-
     if verbose:
         print( 'intensity calibration finished' )