2. Getting started with prtecan#
[1]:
%load_ext autoreload
%autoreload 2
import os
import warnings
from pathlib import Path
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from clophfit import prtecan
from clophfit.binding import fitting, plotting
from clophfit.prtecan import Titration
data_tests = (Path("..") / ".." / "tests" / "Tecan").resolve().absolute()
plt.show()
2025-04-18 12:57:07,004 - clophfit.binding.fitting - INFO - Plotting module started
[2]:
os.chdir(data_tests / "L1")
warnings.filterwarnings("ignore", category=UserWarning, module="clophfit.prtecan")
2.1. Parsing a Single Tecan Files#
A Tecan file comprises of multiple label blocks, each with its unique metadata. This metadata provides critical details and context for the associated label block. In addition, the Tecan file itself also has its overarching metadata that describes its overall content.
When the KEYS for label blocks are identical, it indicates that these label blocks are equivalent - meaning, they contain the same measurements. The equality of KEYS plays a significant role in parsing and analyzing Tecan files, as it assists in identifying and grouping similar measurement sets together. This understanding of label block equivalence based on KEY similarity is critical when working with Tecan files.
[3]:
tf = prtecan.Tecanfile("290513_8.8.xls")
lb1 = tf.labelblocks[1]
lb2 = tf.labelblocks[2]
tf.metadata
[3]:
{'Device: infinite 200': Metadata(value='Serial number: 810002712', unit=['Serial number of connected stacker:']),
'Firmware: V_2.11_04/08_InfiniTe (Apr 4 2008/14.37.11)': Metadata(value='MAI, V_2.11_04/08_InfiniTe (Apr 4 2008/14.37.11)', unit=None),
'Date:': Metadata(value='29/05/2013', unit=None),
'Time:': Metadata(value='11.59.26', unit=None),
'System': Metadata(value='TECANROBOT', unit=None),
'User': Metadata(value='TECANROBOT\\Administrator', unit=None),
'Plate': Metadata(value='PE 96 Flat Bottom White [PE.pdfx]', unit=None),
'Plate-ID (Stacker)': Metadata(value='Plate-ID (Stacker)', unit=None)}
[4]:
print("Metadata:\n", lb1.metadata, "\n")
print("Data:\n", lb1.data)
Metadata:
{'Label': Metadata(value='Label1', unit=None), 'Mode': Metadata(value='Fluorescence Top Reading', unit=None), 'Excitation Wavelength': Metadata(value=400, unit=['nm']), 'Emission Wavelength': Metadata(value=535, unit=['nm']), 'Excitation Bandwidth': Metadata(value=20, unit=['nm']), 'Emission Bandwidth': Metadata(value=25, unit=['nm']), 'Gain': Metadata(value=94, unit=['Optimal']), 'Number of Flashes': Metadata(value=10, unit=None), 'Integration Time': Metadata(value=20, unit=['µs']), 'Lag Time': Metadata(value='µs', unit=None), 'Settle Time': Metadata(value='ms', unit=None), 'Start Time:': Metadata(value='29/05/2013 11.59.52', unit=None), 'Temperature': Metadata(value=25.4, unit=['°C']), 'End Time:': Metadata(value='29/05/2013 12.00.30', unit=None)}
Data:
{'A01': 17123.0, 'A02': 19477.0, 'A03': 20346.0, 'A04': 20322.0, 'A05': 23189.0, 'A06': 21656.0, 'A07': 19716.0, 'A08': 22933.0, 'A09': 24845.0, 'A10': 26932.0, 'A11': 24703.0, 'A12': 31320.0, 'B01': 19915.0, 'B02': 20707.0, 'B03': 34870.0, 'B04': 19916.0, 'B05': 21555.0, 'B06': 20760.0, 'B07': 26798.0, 'B08': 21530.0, 'B09': 24059.0, 'B10': 22748.0, 'B11': 23850.0, 'B12': 29973.0, 'C01': 13223.0, 'C02': 14025.0, 'C03': 16139.0, 'C04': 18865.0, 'C05': 21233.0, 'C06': 20513.0, 'C07': 32580.0, 'C08': 23449.0, 'C09': 22229.0, 'C10': 27775.0, 'C11': 26371.0, 'C12': 28211.0, 'D01': 17795.0, 'D02': 17551.0, 'D03': 19378.0, 'D04': 20682.0, 'D05': 21870.0, 'D06': 23351.0, 'D07': 21868.0, 'D08': 22034.0, 'D09': 22020.0, 'D10': 25802.0, 'D11': 25501.0, 'D12': 27969.0, 'E01': 20430.0, 'E02': 20843.0, 'E03': 19416.0, 'E04': 20888.0, 'E05': 22007.0, 'E06': 23131.0, 'E07': 22137.0, 'E08': 22253.0, 'E09': 23164.0, 'E10': 32320.0, 'E11': 22974.0, 'E12': 22002.0, 'F01': 19330.0, 'F02': 19580.0, 'F03': 22396.0, 'F04': 27164.0, 'F05': 22089.0, 'F06': 24924.0, 'F07': 22568.0, 'F08': 39721.0, 'F09': 22186.0, 'F10': 32362.0, 'F11': 35322.0, 'F12': 28114.0, 'G01': 23505.0, 'G02': 20463.0, 'G03': 33826.0, 'G04': 22191.0, 'G05': 21804.0, 'G06': 22721.0, 'G07': 23578.0, 'G08': 21727.0, 'G09': 34636.0, 'G10': 26413.0, 'G11': 35600.0, 'G12': 34854.0, 'H01': 19143.0, 'H02': 33593.0, 'H03': 27593.0, 'H04': 39346.0, 'H05': 41586.0, 'H06': 23899.0, 'H07': 22724.0, 'H08': 24544.0, 'H09': 23636.0, 'H10': 24116.0, 'H11': 29481.0, 'H12': 28309.0}
[5]:
tf1 = prtecan.Tecanfile("290513_8.2.xls")
tf1.labelblocks[1].__almost_eq__(lb1), tf1.labelblocks[1] == lb1
[5]:
(True, True)
2.2. Titration inherits TecanfilesGroup#
[6]:
tfg = prtecan.TecanfilesGroup([tf, tf1])
lbg1 = tfg.labelblocksgroups[1]
print(lbg1.data["A01"])
lbg1.data_nrm["A01"]
WARNING - Different LabelblocksGroup across files: ['290513_8.8.xls', '290513_8.2.xls'].
[17123.0, 17255.0]
[6]:
[910.7978723404256, 917.8191489361702]
[7]:
tit = prtecan.Titration([tf, tf1], x=np.array([8.8, 8.2]), is_ph=True)
print(tit)
tit.labelblocksgroups[1].data_nrm["A01"]
WARNING - Different LabelblocksGroup across files: ['290513_8.8.xls', '290513_8.2.xls'].
Titration
files=["290513_8.8.xls", ...],
x=[np.float64(8.8), np.float64(8.2)],
x_err=[],
labels=dict_keys([1, 2]),
params=TitrationConfig(bg=True, bg_adj=False, dil=True, nrm=True, bg_mth='mean', mcmc=False) pH=True additions=[]
scheme=PlateScheme(file=None, _buffer=[], _discard=[], _ctrl=[], _names={}))
[7]:
[910.7978723404256, 917.8191489361702]
[8]:
tit.labelblocksgroups == tfg.labelblocksgroups
[8]:
True
[9]:
tit.additions = [100, 1]
tit.params.nrm = True
tit.params.dil = True
tit.params.bg = True
tit.params
[9]:
TitrationConfig(bg=True, bg_adj=False, dil=True, nrm=True, bg_mth='mean', mcmc=False)
[10]:
tit.buffer.wells = ["B02"]
tit.buffer.dataframes
[10]:
{1: B02 Label fit fit_err mean sem
0 20707.0 1 20585.5 0.0 20707.0 NaN
1 20464.0 1 20585.5 0.0 20464.0 NaN,
2: Empty DataFrame
Columns: []
Index: []}
[11]:
tit.bg, tit.bg_err
[11]:
({1: array([1101.43617021, 1088.5106383 ]),
2: array([628.78787879, 590.15306122])},
{1: array([nan, nan]), 2: array([nan, nan])})
[12]:
tit.labelblocksgroups[1].data_nrm["A01"]
[12]:
[910.7978723404256, 917.8191489361702]
2.3. Group a list of tecan files into a titration#
The command Titration.fromlistfile(“../listfile”) reads a list of Tecan files, identifies unique measurements in each file, groups matching ones, and combines them into a titration set for further analysis.
[13]:
tit = Titration.fromlistfile("./list.pH.csv", is_ph=True)
print(tit.x)
lbg1 = tit.labelblocksgroups[1]
lbg2 = tit.labelblocksgroups[2]
print(lbg2.labelblocks[6].metadata["Temperature"])
lbg1.metadata, lbg2.metadata
WARNING - Different LabelblocksGroup across files: [PosixPath('290513_8.8.xls'), PosixPath('290513_8.2.xls'), PosixPath('290513_7.7.xls'), PosixPath('290513_7.2.xls'), PosixPath('290513_6.6.xls'), PosixPath('290513_6.1.xls'), PosixPath('290513_5.5.xls')].
[8.9 8.3 7.7 7.05 6.55 6. 5.5 ]
Metadata(value=25.1, unit=['°C'])
[13]:
({'Label': Metadata(value='Label1', unit=None),
'Mode': Metadata(value='Fluorescence Top Reading', unit=None),
'Excitation Wavelength': Metadata(value=400, unit=['nm']),
'Emission Wavelength': Metadata(value=535, unit=['nm']),
'Excitation Bandwidth': Metadata(value=20, unit=['nm']),
'Emission Bandwidth': Metadata(value=25, unit=['nm']),
'Number of Flashes': Metadata(value=10, unit=None),
'Integration Time': Metadata(value=20, unit=['µs']),
'Lag Time': Metadata(value='µs', unit=None),
'Settle Time': Metadata(value='ms', unit=None),
'Gain': Metadata(value=94, unit=None)},
{'Label': Metadata(value='Label2', unit=None),
'Mode': Metadata(value='Fluorescence Top Reading', unit=None),
'Excitation Wavelength': Metadata(value=485, unit=['nm']),
'Emission Wavelength': Metadata(value=535, unit=['nm']),
'Excitation Bandwidth': Metadata(value=25, unit=['nm']),
'Emission Bandwidth': Metadata(value=25, unit=['nm']),
'Number of Flashes': Metadata(value=10, unit=None),
'Integration Time': Metadata(value=20, unit=['µs']),
'Lag Time': Metadata(value='µs', unit=None),
'Settle Time': Metadata(value='ms', unit=None),
'Movement': Metadata(value='Move Plate Out', unit=None)})
Within each labelblockgroups data_norm
is immediately calculated.
[14]:
(lbg1.data["H03"], lbg2.data, lbg1.data_nrm["H03"], lbg2.data_nrm["H03"])
[14]:
([27593.0, 26956.0, 26408.0, 26815.0, 28308.0, 30227.0, 30640.0],
{},
[1467.712765957447,
1433.8297872340427,
1404.6808510638298,
1426.3297872340427,
1505.7446808510638,
1607.8191489361702,
1629.787234042553],
[1456.2121212121212,
1363.9285714285716,
1310.357142857143,
1214.5408163265306,
1200.9693877551022,
1224.642857142857,
1193.8265306122448])
Start with platescheme loading to set buffer wells (and consequently buffer values).
Labelblocks group will be populated with data buffer subtracted with/out normalization.
[15]:
tit.load_scheme("./scheme.txt")
print(f"Buffer wells : {tit.scheme.buffer}")
print(f"Ctrl wells : {tit.scheme.ctrl}")
print(f"CTR name:wells {tit.scheme.names}")
tit.scheme
Buffer wells : ['C12', 'D01', 'D12', 'E01', 'E12', 'F01']
Ctrl wells : ['G12', 'H12', 'A12', 'G01', 'H01', 'F12', 'C01', 'A01', 'B12', 'B01']
CTR name:wells {'E2GFP': {'F12', 'C01', 'G01', 'B12'}, 'V224L': {'H12', 'A12', 'A01', 'H01'}, 'V224Q': {'G12', 'B01'}}
[15]:
PlateScheme(file='./scheme.txt', _buffer=['C12', 'D01', 'D12', 'E01', 'E12', 'F01'], _discard=[], _ctrl=['G12', 'H12', 'A12', 'G01', 'H01', 'F12', 'C01', 'A01', 'B12', 'B01'], _names={'E2GFP': {'F12', 'C01', 'G01', 'B12'}, 'V224L': {'H12', 'A12', 'A01', 'H01'}, 'V224Q': {'G12', 'B01'}})
[16]:
(lbg1.data["H12"], lbg2.data_nrm["H12"])
[16]:
([28309.0, 27837.0, 26511.0, 25771.0, 27048.0, 27794.0, 28596.0],
[714.4949494949495,
686.1224489795918,
683.3673469387755,
693.9795918367347,
737.0408163265306,
745.765306122449,
725.8163265306123])
[17]:
tit.load_additions("./additions.pH")
tit.additions
[17]:
[100, 2, 2, 2, 2, 2, 2]
[18]:
(lbg1.data["H12"], tit.data[1]["H12"], lbg1.data_nrm["H12"], tit.bg[1])
[18]:
([28309.0, 27837.0, 26511.0, 25771.0, 27048.0, 27794.0, 28596.0],
array([302.45567376, 321.23670213, 313.92695035, 277.94929078,
353.65212766, 335.1001773 , 316.34042553]),
[1505.7978723404256,
1480.6914893617022,
1410.159574468085,
1370.7978723404256,
1438.723404255319,
1478.404255319149,
1521.063829787234],
array([1203.34219858, 1165.7535461 , 1108.30673759, 1108.58156028,
1111.2677305 , 1173.7677305 , 1238.61702128]))
The order in which you apply dilution correction and plate scheme can impact your intermediate results, even though the final results might be the same.
Dilution correction adjusts the measured data to account for any dilutions made during sample preparation. This typically involves multiplying the measured values by the dilution factor to estimate the true concentration of the sample.
A plate scheme describes the layout of the samples on a plate (common in laboratory experiments, such as those involving microtiter plates). The plate scheme may involve rearranging or grouping the data in some way based on the physical location of the samples on the plate.
2.3.1. Reassign Buffer Wells#
You can reassess buffer wells, updating the data to account for any dilution (additions) and subtracting the updated buffer value. This is a handy feature that gives you more control over your analysis.
For instance, consider the following data for a particular well:
[19]:
print(tit.labelblocksgroups[2].data_nrm["D01"])
tit.data[2].get("D01")
[373.6363636363636, 318.6734693877551, 332.60204081632656, 345.0, 364.03061224489795, 375.3571428571429, 401.5816326530612]
[19]:
array([-131.98653199, -151.43877551, -146.51972789, -164.61547619,
-173.15816327, -229.88690476, -201.95238095])
[20]:
tit.params.bg = False
tit.params.dil = False
print(tit.data[2]["D02"])
[608.23232323 564.03061224 551.2244898 517.19387755 488.92857143
481.07142857 460.51020408]
You can reassign buffer wells using the buffer_wells
attribute:
[21]:
tit.params.bg = True
tit.buffer.wells = ["D01", "E01"]
[22]:
tit.bg
[22]:
{1: array([1016.62234043, 971.56914894, 937.9787234 , 951.38297872,
985.74468085, 1005.26595745, 1070.85106383]),
2: array([420.05050505, 364.69387755, 382.5255102 , 402.19387755,
430.84183673, 455.07653061, 466.19897959])}
This updates the data for the specified wells, correcting for dilution and subtracting the buffer value:
🚨 The data remain: 🚨
unchanged in labelblocksgroups[:].data
buffer subtracted in labelblocksgroups[:].data_buffersubtracted
buffer subtracted and dilution corrected in data
2.4. Fitting#
test:
E10
F10
G09
TODO:
Remove datapoint ini fin outlier
[23]:
os.chdir(data_tests / "L1")
tit = Titration.fromlistfile("./list.pH.csv", is_ph=True)
tit.load_scheme("./scheme.0.txt")
tit.load_additions("additions.pH")
tit
WARNING - Different LabelblocksGroup across files: [PosixPath('290513_8.8.xls'), PosixPath('290513_8.2.xls'), PosixPath('290513_7.7.xls'), PosixPath('290513_7.2.xls'), PosixPath('290513_6.6.xls'), PosixPath('290513_6.1.xls'), PosixPath('290513_5.5.xls')].
[23]:
Titration
files=["290513_8.8.xls", ...],
x=[np.float64(8.9), np.float64(8.3), np.float64(7.7), np.float64(7.05), np.float64(6.55), np.float64(6.0), np.float64(5.5)],
x_err=[np.float64(0.04), np.float64(0.05), np.float64(0.05), np.float64(0.06), np.float64(0.07), np.float64(0.08), np.float64(0.1)],
labels=dict_keys([1, 2]),
params=TitrationConfig(bg=True, bg_adj=False, dil=True, nrm=True, bg_mth='mean', mcmc=False) pH=True additions=[100, 2, 2, 2, 2, 2, 2]
scheme=PlateScheme(file='./scheme.0.txt', _buffer=['C12', 'D01', 'D12', 'E01', 'E12', 'F01'], _discard=['E10', 'H05'], _ctrl=['G12', 'H12', 'A12', 'G01', 'H01', 'F12', 'C01', 'A01', 'B12', 'B01'], _names={'E2GFP': {'F12', 'C01', 'G01', 'B12'}, 'V224L': {'H12', 'A12', 'A01', 'H01'}, 'V224Q': {'G12', 'B01'}}))
[24]:
tit.results[1].dataframe.head()
[24]:
K | sK | Khdi03 | Khdi97 | S0_default | sS0_default | S0_defaulthdi03 | S0_defaulthdi97 | S1_default | sS1_default | S1_defaulthdi03 | S1_defaulthdi97 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
well | ||||||||||||
G10 | 10.999998 | 1127.479204 | 3 | 11 | -789.320921 | 2.645533e+06 | -inf | inf | 211.360074 | 11.709338 | -inf | inf |
B02 | 9.228008 | 3.897769 | 3 | 11 | -212.125103 | 1.002620e+03 | -inf | inf | -51.404929 | 12.437040 | -inf | inf |
D02 | 8.643629 | 0.696671 | 3 | 11 | -325.926559 | 1.060139e+02 | -inf | inf | -172.510547 | 10.425915 | -inf | inf |
C07 | 6.347515 | 0.667163 | 3 | 11 | 497.614745 | 1.451239e+01 | -inf | inf | 576.631750 | 34.962094 | -inf | inf |
D10 | 7.309583 | 0.318677 | 3 | 11 | 174.625614 | 1.570924e+01 | -inf | inf | 281.117260 | 12.079946 | -inf | inf |
[25]:
rg = tit.result_global["D10"]
rg.figure
[25]:

[26]:
%%time
rro = fitting.fit_binding_pymc_odr(rg, n_sd=0.5)
rro
{'K': K, 'S0_y1': S0_y1, 'S1_y1': S1_y1, 'S0_y2': S0_y2, 'S1_y2': S1_y2}
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [K, S0_y1, S1_y1, S0_y2, S1_y2, ye_mag, xe_mag]
Sampling 4 chains for 1_000 tune and 2_000 draw iterations (4_000 + 8_000 draws total) took 13 seconds.
There were 235 divergences after tuning. Increase `target_accept` or reparameterize.
CPU times: user 33.7 s, sys: 1.17 s, total: 34.9 s
Wall time: 3min 34s
[26]:
-
<xarray.Dataset> Size: 3MB Dimensions: (chain: 4, draw: 2000, x_prime_y1_dim_0: 7, y_prime_y1_dim_0: 7, y_model_y1_dim_0: 7, x_prime_y2_dim_0: 7, y_prime_y2_dim_0: 7, y_model_y2_dim_0: 7) Coordinates: * chain (chain) int64 32B 0 1 2 3 * draw (draw) int64 16kB 0 1 2 3 4 5 ... 1995 1996 1997 1998 1999 * x_prime_y1_dim_0 (x_prime_y1_dim_0) int64 56B 0 1 2 3 4 5 6 * y_prime_y1_dim_0 (y_prime_y1_dim_0) int64 56B 0 1 2 3 4 5 6 * y_model_y1_dim_0 (y_model_y1_dim_0) int64 56B 0 1 2 3 4 5 6 * x_prime_y2_dim_0 (x_prime_y2_dim_0) int64 56B 0 1 2 3 4 5 6 * y_prime_y2_dim_0 (y_prime_y2_dim_0) int64 56B 0 1 2 3 4 5 6 * y_model_y2_dim_0 (y_model_y2_dim_0) int64 56B 0 1 2 3 4 5 6 Data variables: (12/13) K (chain, draw) float64 64kB 6.894 6.876 ... 6.852 6.894 S0_y1 (chain, draw) float64 64kB 174.8 194.3 ... 182.4 192.7 S1_y1 (chain, draw) float64 64kB 291.8 295.4 ... 291.0 295.0 S0_y2 (chain, draw) float64 64kB 655.5 663.2 ... 651.4 665.3 S1_y2 (chain, draw) float64 64kB 220.4 222.8 ... 222.1 225.2 ye_mag (chain, draw) float64 64kB 7.133 13.33 ... 10.14 9.751 ... ... x_prime_y1 (chain, draw, x_prime_y1_dim_0) float64 448kB 10.37 ...... y_prime_y1 (chain, draw, y_prime_y1_dim_0) float64 448kB 174.8 ...... y_model_y1 (chain, draw, y_model_y1_dim_0) float64 448kB 175.9 ...... x_prime_y2 (chain, draw, x_prime_y2_dim_0) float64 448kB 10.6 ... ... y_prime_y2 (chain, draw, y_prime_y2_dim_0) float64 448kB 655.4 ...... y_model_y2 (chain, draw, y_model_y2_dim_0) float64 448kB 651.2 ...... Attributes: created_at: 2025-04-18T13:00:45.310832+00:00 arviz_version: 0.21.0 inference_library: pymc inference_library_version: 5.22.0 sampling_time: 13.38464641571045 tuning_steps: 1000
-
<xarray.Dataset> Size: 992kB Dimensions: (chain: 4, draw: 2000) Coordinates: * chain (chain) int64 32B 0 1 2 3 * draw (draw) int64 16kB 0 1 2 3 4 ... 1996 1997 1998 1999 Data variables: (12/17) process_time_diff (chain, draw) float64 64kB 0.001969 ... 0.001504 reached_max_treedepth (chain, draw) bool 8kB False False ... False False lp (chain, draw) float64 64kB -188.8 -187.4 ... -189.2 index_in_trajectory (chain, draw) int64 64kB -7 3 1 -4 -5 ... 1 3 7 -5 -4 perf_counter_diff (chain, draw) float64 64kB 0.001969 ... 0.001504 step_size_bar (chain, draw) float64 64kB 0.7153 0.7153 ... 0.7015 ... ... energy (chain, draw) float64 64kB 190.7 190.1 ... 192.2 perf_counter_start (chain, draw) float64 64kB 772.5 772.5 ... 778.0 largest_eigval (chain, draw) float64 64kB nan nan nan ... nan nan tree_depth (chain, draw) int64 64kB 3 3 2 3 3 3 ... 3 2 3 3 3 3 step_size (chain, draw) float64 64kB 0.5942 0.5942 ... 0.7538 n_steps (chain, draw) float64 64kB 7.0 7.0 3.0 ... 7.0 7.0 Attributes: created_at: 2025-04-18T13:00:45.330028+00:00 arviz_version: 0.21.0 inference_library: pymc inference_library_version: 5.22.0 sampling_time: 13.38464641571045 tuning_steps: 1000
-
<xarray.Dataset> Size: 224B Dimensions: (orthogonal_likelihood_y1_dim_0: 7, orthogonal_likelihood_y2_dim_0: 7) Coordinates: * orthogonal_likelihood_y1_dim_0 (orthogonal_likelihood_y1_dim_0) int64 56B ... * orthogonal_likelihood_y2_dim_0 (orthogonal_likelihood_y2_dim_0) int64 56B ... Data variables: orthogonal_likelihood_y1 (orthogonal_likelihood_y1_dim_0) float64 56B ... orthogonal_likelihood_y2 (orthogonal_likelihood_y2_dim_0) float64 56B ... Attributes: created_at: 2025-04-18T13:00:45.334282+00:00 arviz_version: 0.21.0 inference_library: pymc inference_library_version: 5.22.0
[27]:
df = az.summary(rro)
df
[27]:
mean | sd | hdi_3% | hdi_97% | mcse_mean | mcse_sd | ess_bulk | ess_tail | r_hat | |
---|---|---|---|---|---|---|---|---|---|
K | 6.877 | 0.026 | 6.828 | 6.927 | 0.000 | 0.000 | 9239.0 | 5956.0 | 1.0 |
S0_y1 | 187.852 | 5.612 | 177.423 | 198.477 | 0.057 | 0.064 | 9650.0 | 5861.0 | 1.0 |
S1_y1 | 292.451 | 5.940 | 281.085 | 303.376 | 0.062 | 0.068 | 9114.0 | 5705.0 | 1.0 |
S0_y2 | 658.289 | 3.982 | 650.826 | 665.765 | 0.041 | 0.047 | 9227.0 | 6135.0 | 1.0 |
S1_y2 | 222.920 | 6.094 | 211.212 | 233.953 | 0.062 | 0.072 | 9685.0 | 5815.0 | 1.0 |
ye_mag | 8.707 | 5.987 | 0.581 | 19.474 | 0.062 | 0.067 | 7893.0 | 6249.0 | 1.0 |
xe_mag | 14.936 | 0.406 | 14.199 | 15.705 | 0.004 | 0.005 | 9746.0 | 5728.0 | 1.0 |
x_prime_y1[0] | 10.369 | 0.000 | 10.369 | 10.369 | 0.000 | NaN | 8000.0 | 8000.0 | NaN |
x_prime_y1[1] | 8.925 | 0.000 | 8.925 | 8.925 | 0.000 | NaN | 8000.0 | 8000.0 | NaN |
x_prime_y1[2] | 7.307 | 0.000 | 7.307 | 7.307 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y1[3] | 7.124 | 0.000 | 7.124 | 7.124 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y1[4] | 4.837 | 0.000 | 4.837 | 4.837 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y1[5] | 5.816 | 0.000 | 5.816 | 5.816 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y1[6] | 6.104 | 0.000 | 6.104 | 6.104 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
y_prime_y1[0] | 187.886 | 5.610 | 177.459 | 198.504 | 0.057 | 0.064 | 9650.0 | 5861.0 | 1.0 |
y_prime_y1[1] | 188.782 | 5.562 | 178.442 | 199.298 | 0.057 | 0.063 | 9633.0 | 5930.0 | 1.0 |
y_prime_y1[2] | 216.211 | 4.572 | 207.516 | 224.666 | 0.048 | 0.051 | 9224.0 | 6087.0 | 1.0 |
y_prime_y1[3] | 225.728 | 4.431 | 217.591 | 234.118 | 0.046 | 0.048 | 9113.0 | 6086.0 | 1.0 |
y_prime_y1[4] | 291.505 | 5.887 | 280.197 | 302.280 | 0.062 | 0.068 | 9113.0 | 5689.0 | 1.0 |
y_prime_y1[5] | 284.089 | 5.510 | 273.748 | 294.310 | 0.058 | 0.063 | 9100.0 | 5876.0 | 1.0 |
y_prime_y1[6] | 277.364 | 5.217 | 267.421 | 286.834 | 0.055 | 0.060 | 9080.0 | 5739.0 | 1.0 |
y_model_y1[0] | 188.837 | 5.559 | 178.500 | 199.355 | 0.057 | 0.063 | 9633.0 | 5895.0 | 1.0 |
y_model_y1[1] | 191.667 | 5.415 | 181.997 | 202.381 | 0.055 | 0.062 | 9596.0 | 5793.0 | 1.0 |
y_model_y1[2] | 201.547 | 4.988 | 192.563 | 211.109 | 0.051 | 0.056 | 9445.0 | 5905.0 | 1.0 |
y_model_y1[3] | 229.901 | 4.400 | 221.687 | 238.082 | 0.046 | 0.048 | 9087.0 | 6136.0 | 1.0 |
y_model_y1[4] | 258.964 | 4.647 | 250.280 | 267.471 | 0.049 | 0.052 | 9050.0 | 5738.0 | 1.0 |
y_model_y1[5] | 280.187 | 5.334 | 269.782 | 289.667 | 0.056 | 0.061 | 9084.0 | 5763.0 | 1.0 |
y_model_y1[6] | 288.233 | 5.713 | 277.057 | 298.507 | 0.060 | 0.066 | 9108.0 | 5716.0 | 1.0 |
x_prime_y2[0] | 10.602 | 0.000 | 10.602 | 10.602 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[1] | 8.196 | 0.000 | 8.196 | 8.196 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[2] | 7.702 | 0.000 | 7.702 | 7.702 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[3] | 7.058 | 0.000 | 7.058 | 7.058 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[4] | 6.644 | 0.000 | 6.644 | 6.644 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[5] | 5.970 | 0.000 | 5.970 | 5.970 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
x_prime_y2[6] | 5.549 | 0.000 | 5.549 | 5.549 | 0.000 | 0.000 | 8000.0 | 8000.0 | NaN |
y_prime_y2[0] | 658.207 | 3.981 | 650.730 | 665.658 | 0.041 | 0.047 | 9224.0 | 6135.0 | 1.0 |
y_prime_y2[1] | 638.314 | 3.986 | 630.763 | 645.756 | 0.041 | 0.046 | 9259.0 | 5952.0 | 1.0 |
y_prime_y2[2] | 601.478 | 4.661 | 592.865 | 610.557 | 0.048 | 0.052 | 9282.0 | 6297.0 | 1.0 |
y_prime_y2[3] | 485.123 | 7.198 | 471.869 | 498.460 | 0.075 | 0.078 | 9288.0 | 6434.0 | 1.0 |
y_prime_y2[4] | 383.559 | 7.392 | 369.816 | 397.311 | 0.076 | 0.081 | 9434.0 | 6175.0 | 1.0 |
y_prime_y2[5] | 270.927 | 6.005 | 259.476 | 281.764 | 0.061 | 0.070 | 9738.0 | 5891.0 | 1.0 |
y_prime_y2[6] | 242.497 | 5.921 | 231.255 | 253.320 | 0.060 | 0.070 | 9736.0 | 5711.0 | 1.0 |
y_model_y2[0] | 654.188 | 3.953 | 646.723 | 661.561 | 0.041 | 0.046 | 9236.0 | 6213.0 | 1.0 |
y_model_y2[1] | 642.410 | 3.958 | 634.985 | 649.825 | 0.041 | 0.046 | 9251.0 | 5976.0 | 1.0 |
y_model_y2[2] | 601.292 | 4.666 | 592.708 | 610.420 | 0.048 | 0.052 | 9283.0 | 6271.0 | 1.0 |
y_model_y2[3] | 483.279 | 7.220 | 469.963 | 496.637 | 0.075 | 0.078 | 9291.0 | 6487.0 | 1.0 |
y_model_y2[4] | 362.307 | 7.187 | 349.092 | 375.779 | 0.074 | 0.079 | 9478.0 | 6231.0 | 1.0 |
y_model_y2[5] | 273.968 | 6.031 | 262.550 | 284.940 | 0.061 | 0.070 | 9734.0 | 5868.0 | 1.0 |
y_model_y2[6] | 240.474 | 5.929 | 229.438 | 251.583 | 0.060 | 0.070 | 9744.0 | 5799.0 | 1.0 |
[28]:
az.plot_trace(rro)
[28]:
array([[<Axes: title={'center': 'K'}>, <Axes: title={'center': 'K'}>],
[<Axes: title={'center': 'S0_y1'}>,
<Axes: title={'center': 'S0_y1'}>],
[<Axes: title={'center': 'S1_y1'}>,
<Axes: title={'center': 'S1_y1'}>],
[<Axes: title={'center': 'S0_y2'}>,
<Axes: title={'center': 'S0_y2'}>],
[<Axes: title={'center': 'S1_y2'}>,
<Axes: title={'center': 'S1_y2'}>],
[<Axes: title={'center': 'ye_mag'}>,
<Axes: title={'center': 'ye_mag'}>],
[<Axes: title={'center': 'xe_mag'}>,
<Axes: title={'center': 'xe_mag'}>],
[<Axes: title={'center': 'x_prime_y1'}>,
<Axes: title={'center': 'x_prime_y1'}>],
[<Axes: title={'center': 'y_prime_y1'}>,
<Axes: title={'center': 'y_prime_y1'}>],
[<Axes: title={'center': 'y_model_y1'}>,
<Axes: title={'center': 'y_model_y1'}>],
[<Axes: title={'center': 'x_prime_y2'}>,
<Axes: title={'center': 'x_prime_y2'}>],
[<Axes: title={'center': 'y_prime_y2'}>,
<Axes: title={'center': 'y_prime_y2'}>],
[<Axes: title={'center': 'y_model_y2'}>,
<Axes: title={'center': 'y_model_y2'}>]], dtype=object)

[29]:
rp = tit.result_mcmc["H11"]
INFO - n_sd[Global] estimated for MCMC fitting: 1.354
INFO - Starting PyMC sampling for key: H11
2025-04-18 13:00:54,906 - clophfit.binding.fitting - INFO - min pH distance: 0.2695113885676778
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [K, S0_y1, S1_y1, S0_y2, S1_y2, x_diff, x_start, ye_mag]
Sampling 4 chains for 1_000 tune and 2_000 draw iterations (4_000 + 8_000 draws total) took 20 seconds.
INFO - MCMC fitting completed for well: H11
Parameters([('K', <Parameter 'K', value=np.float64(6.883) +/- 0.063, bounds=[np.float64(6.768):np.float64(7.005)]>), ('S0_y1', <Parameter 'S0_y1', value=np.float64(384.242) +/- 17.5, bounds=[np.float64(351.708):np.float64(417.627)]>), ('S1_y1', <Parameter 'S1_y1', value=np.float64(405.553) +/- 19, bounds=[np.float64(368.741):np.float64(441.872)]>), ('S0_y2', <Parameter 'S0_y2', value=np.float64(1479.462) +/- 12.8, bounds=[np.float64(1453.714):np.float64(1502.024)]>), ('S1_y2', <Parameter 'S1_y2', value=np.float64(508.057) +/- 23.8, bounds=[np.float64(464.22):np.float64(553.24)]>)])
{'K': <Parameter 'K', value=np.float64(6.883) +/- 0.063, bounds=[np.float64(6.768):np.float64(7.005)]>, 'S0': <Parameter 'S0_y2', value=np.float64(1479.462) +/- 12.8, bounds=[np.float64(1453.714):np.float64(1502.024)]>, 'S1': <Parameter 'S1_y2', value=np.float64(508.057) +/- 23.8, bounds=[np.float64(464.22):np.float64(553.24)]>}
[30]:
rp.figure
[30]:

[31]:
rp.dataset
[31]:
{'y1': DataArray(xc=array([8.894, 8.272, 7.67 , 7.043, 6.62 , 6.031, 5.508]), yc=array([364.79609929, 400.93776596, 397.68014184, 363.4822695 ,
436.72021277, 391.79698582, 394.91914894]), x_errc=array([0.038, 0.065, 0.071, 0.07 , 0.079, 0.114, 0.167]), y_errc=array([37.8105439 , 39.62776095, 33.40163612, 31.72400927, 30.36933227,
31.24031571, 36.33514289]), _mask=array([ True, True, True, True, True, True, True])),
'y2': DataArray(xc=array([8.894, 8.272, 7.67 , 7.043, 6.62 , 6.031, 5.508]), yc=array([1498.46801347, 1419.41326531, 1334.94965986, 1076.7202381 ,
864.1377551 , 625.64370748, 541.24761905]), x_errc=array([0.038, 0.065, 0.071, 0.07 , 0.079, 0.114, 0.167]), y_errc=array([20.66548948, 22.52407769, 20.41608608, 21.22118117, 21.00945912,
23.24670688, 26.95347446]), _mask=array([ True, True, True, True, True, True, True]))}
[32]:
az.plot_trace(
rp.mini, var_names=["x_true", "K", "x_diff"], divergences=False, combined=True
)
[32]:
array([[<Axes: title={'center': 'x_true'}>,
<Axes: title={'center': 'x_true'}>],
[<Axes: title={'center': 'K'}>, <Axes: title={'center': 'K'}>],
[<Axes: title={'center': 'x_diff'}>,
<Axes: title={'center': 'x_diff'}>]], dtype=object)

[33]:
type(tit.result_global["H11"].result.params.keys())
[33]:
dict_keys
[34]:
tit.result_global.all_computed()
[34]:
True
2.4.1. Choose buffer value to be subtracted between mean values and ODR fitted values.#
[35]:
lb = 2
x = tit.buffer.dataframes_nrm[lb]["fit"]
y = tit.buffer.dataframes_nrm[lb]["mean"]
x_err = tit.buffer.dataframes_nrm[lb]["fit_err"] / 10
y_err = tit.buffer.dataframes_nrm[lb]["sem"] / 10
plt.errorbar(
x,
y,
xerr=x_err,
yerr=y_err,
fmt="o",
color="blue",
ecolor="lightgray",
elinewidth=2,
capsize=4,
)
plt.xlabel("ODR Fit")
plt.ylabel("Buffer wells Mean")
[35]:
Text(0, 0.5, 'Buffer wells Mean')

[36]:
tit.buffer.plot(1).fig
[36]:

[37]:
tit.buffer.fit_results_nrm
[37]:
{1: BufferFit(m=np.float64(-7.306402487135711), q=np.float64(1204.3701797583942), m_err=np.float64(19.39191342345837), q_err=np.float64(137.90570601875348)),
2: BufferFit(m=np.float64(-29.420727413380433), q=np.float64(728.3516672997897), m_err=np.float64(10.519439216261752), q_err=np.float64(77.32603244223898))}
[38]:
tit.plot_temperature()
[38]:

[39]:
tit.bg_err
[39]:
{1: array([ 96.7021583 , 101.34977225, 85.42617933, 81.13557359,
77.67092652, 79.89850564, 92.9287542 ]),
2: array([52.85291426, 57.60633679, 52.2150539 , 54.27412064, 53.73263201,
59.45449329, 68.93471729])}
[40]:
k = "F10" # "G09"
tit.result_global[k].figure
[40]:

[41]:
r = tit.result_global[k]
r.result.params
[41]:
name | value | standard error | relative error | initial value | min | max | vary |
---|---|---|---|---|---|---|---|
K | 6.89255571 | 0.35242677 | (5.11%) | 8.3 | 3.00000000 | 11.0000000 | True |
S0_y1 | 558.410690 | 31.5513382 | (5.65%) | 518.0407801418442 | -inf | inf | True |
S1_y1 | 785.620639 | 37.9065017 | (4.83%) | 791.5659574468086 | -inf | inf | True |
S0_y2 | 878.032715 | 15.1027791 | (1.72%) | 885.993265993266 | -inf | inf | True |
S1_y2 | 860.047980 | 19.4517101 | (2.26%) | 922.9619047619049 | -inf | inf | True |
[42]:
tit.result_odr[k].figure
[42]:

[43]:
ro = fitting.fit_binding_odr(r)
ro.figure
# ro.result.params
[43]:

[44]:
tit._dil_corr
[44]:
array([1. , 1.02, 1.04, 1.06, 1.08, 1.1 , 1.12])
[45]:
tit.params.nrm = False
tit.params
[45]:
TitrationConfig(bg=True, bg_adj=False, dil=True, nrm=False, bg_mth='mean', mcmc=False)
[46]:
os.chdir("../../Tecan/140220/")
tit = Titration.fromlistfile("./list.pH.csv", is_ph=True)
tit.load_scheme("./scheme.txt")
tit.load_additions("additions.pH")
WARNING - OVER value in Label1: H02 of tecanfile pH7.6_200214.xls
WARNING - OVER value in Label1: H02 of tecanfile pH7.1_200214.xls
WARNING - OVER value in Label1: A06 of tecanfile pH6.5_200214.xls
WARNING - OVER value in Label1: H02 of tecanfile pH6.5_200214.xls
WARNING - OVER value in Label1: A06 of tecanfile pH5.8_200214.xls
WARNING - OVER value in Label1: H02 of tecanfile pH5.8_200214.xls
WARNING - OVER value in Label1: H02 of tecanfile pH5.0_200214.xls
[47]:
tit.data[1]["H03"]
[47]:
array([ 449.81182796, 654.84274194, 1000.00752688, 1218.41586022,
1298.01774194, 1313.15456989, 977.35053763])
[48]:
tit.params.bg_adj = True
tit.params.bg_mth = "mean"
tit.params
[48]:
TitrationConfig(bg=True, bg_adj=True, dil=True, nrm=True, bg_mth='mean', mcmc=False)
[49]:
df1 = pd.read_csv("../140220/fit1-1.csv", index_col=0)
# merged_df = tit.result_dfs[1][["K", "sK"]].merge(df1, left_index=True, right_index=True)
merged_df = (
tit.results[2].dataframe[["K", "sK"]].merge(df1, left_index=True, right_index=True)
)
sb.jointplot(merged_df, x="K_y", y="K_x", ratio=3, space=0.4)
WARNING - Buffer for 'G03:2' was adjusted by 293.02 SD.
[49]:
<seaborn.axisgrid.JointGrid at 0x7f9103541ee0>

[50]:
tit.result_global["A01"].figure
[50]:

[51]:
tit.data[1]["A01"]
[51]:
array([210.34946237, 324.65887097, 492.19677419, 556.48575269,
640.20483871, 602.59005376, 585.10967742])
If a fit fails in a well, the well key will be anyway present in results list of dict.
tit.results[1].compute_all() conf = prtecan.TecanConfig(Path("jjj"), False, (), "", True, True) tit.export_data_fit(conf)[52]:
print(tit.data[1]["H02"])
tit.results[2]["H02"].figure
[1865.34946237 2636.32983871 nan nan nan
nan nan]
[52]:

[53]:
print(tit.results[2].results.keys() - tit.results[2].results.keys())
print(tit.result_global.results.keys() - tit.results[1].results.keys())
print(tit.result_odr.results.keys() - tit.results[1].results.keys())
set()
{'A01'}
set()
[54]:
tit.params.nrm = False
[55]:
tit.results[1]["H01"].figure
WARNING - Buffer for 'G03:2' was adjusted by 293.02 SD.
[55]:

[56]:
tit.result_global["H01"].figure
[56]:

And in the global fit (i.e. fitting 2 labelblocks) dataset with insufficient data points are removed.
[57]:
tit.params.nrm = True
well = "H02"
y1 = np.array(tit.data[1][well])
y2 = np.array(tit.data[2][well])
x = np.array(tit.x)
ds = fitting.Dataset(
{"y1": fitting.DataArray(x, y1), "y2": fitting.DataArray(x, y2)}, is_ph=True
)
rfit = fitting.fit_binding_glob(ds)
rfit.result.params
WARNING - Buffer for 'G03:2' was adjusted by 293.02 SD.
[57]:
name | value | standard error | relative error | initial value | min | max | vary |
---|---|---|---|---|---|---|---|
K | 7.88991558 | 0.01709793 | (0.22%) | 8.025 | 3.00000000 | 11.0000000 | True |
S0_y1 | 1616.23180 | 39.2873682 | (2.43%) | 1865.3494623655913 | -inf | inf | True |
S1_y1 | 5578.89667 | 207.558664 | (3.72%) | 2636.3298387096775 | -inf | inf | True |
S0_y2 | 3868.15276 | 33.7500018 | (0.87%) | 3621.808035714286 | -inf | inf | True |
S1_y2 | 230.187234 | 16.7493312 | (7.28%) | 218.85000000000002 | -inf | inf | True |
[58]:
rfit.figure
[58]:

[59]:
fitting.fit_binding_odr(rfit).figure
[59]:

[60]:
tit.results[2].dataframe.head()
[60]:
K | sK | Khdi03 | Khdi97 | S0_default | sS0_default | S0_defaulthdi03 | S0_defaulthdi97 | S1_default | sS1_default | S1_defaulthdi03 | S1_defaulthdi97 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
well | ||||||||||||
G10 | 8.030983 | 0.024053 | 3 | 11 | 75.520703 | 0.966819 | -inf | inf | 2.480673 | 0.632868 | -inf | inf |
B02 | 7.076923 | 0.112891 | 3 | 11 | 296.902618 | 9.080260 | -inf | inf | 68.765560 | 14.594769 | -inf | inf |
D02 | 6.981087 | 0.039574 | 3 | 11 | 408.894299 | 4.518109 | -inf | inf | 63.231912 | 7.909789 | -inf | inf |
C07 | 6.915918 | 0.025421 | 3 | 11 | 405.949185 | 2.832968 | -inf | inf | 52.540626 | 5.257776 | -inf | inf |
D10 | 8.004353 | 0.018555 | 3 | 11 | 581.893041 | 5.602669 | -inf | inf | 28.914445 | 3.786778 | -inf | inf |
You can decide how to pre-process data with datafit_params:
[bg] subtract background
[dil] apply correction for dilution (when e.g. during a titration you add titrant without protein)
[nrm] normalize for gain, number of flashes and integration time.
[61]:
tit.params.nrm = False
tit.params.bg = True
tit.params.bg_adj = False
tit.data[1]["E06"]
[61]:
array([ 7069.5 , 10878.555, 16151.98 , 19465.575, 20140.65 , 19205.175,
13086.08 ])
2.4.2. Posterior analysis with emcee#
To explore the posterior of parameters you can use the Minimizer object returned in FitResult.
[62]:
np.random.seed(0) # noqa: NPY002
remcee = rfit.mini.emcee(
burn=50,
steps=2000,
workers=8,
thin=10,
nwalkers=30,
progress=False,
is_weighted=False,
)
The chain is shorter than 50 times the integrated autocorrelation time for 6 parameter(s). Use this estimate with caution and run a longer chain!
N/50 = 40;
tau: [111.06922308 191.80894426 171.41194937 75.9163616 60.79653061
236.46411446]
[63]:
f = plotting.plot_emcee(remcee.flatchain)
print(remcee.flatchain.quantile([0.03, 0.97])["K"].to_list())
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
WARNING:root:Too few points to create valid contours
[7.6501941591783575, 7.977095194607985]

[64]:
samples = remcee.flatchain[["K"]]
# Convert the dictionary of flatchains to an ArviZ InferenceData object
samples_dict = {key: np.array(val) for key, val in samples.items()}
idata = az.from_dict(posterior=samples_dict)
k_samples = idata.posterior["K"].to_numpy()
percentile_value = np.percentile(k_samples, 3)
print(f"Value at which the probability of being higher is 99%: {percentile_value}")
az.plot_forest(k_samples)
Value at which the probability of being higher is 99%: 7.6501941591783575
[64]:
array([<Axes: title={'center': '94.0% HDI'}>], dtype=object)

2.4.3. Cl titration analysis#
[65]:
os.chdir("../140220/")
cl_an = prtecan.Titration.fromlistfile("list.cl.csv", is_ph=False)
cl_an.load_scheme("scheme.txt")
cl_an.scheme
WARNING - OVER value in Label1: H02 of tecanfile pH5.0_200214.xls
WARNING:clophfit.prtecan.prtecan: OVER value in Label1: H02 of tecanfile pH5.0_200214.xls
[65]:
PlateScheme(file='scheme.txt', _buffer=['D01', 'E01', 'D12', 'E12'], _discard=[], _ctrl=['F01', 'C12', 'G12', 'H12', 'A12', 'G01', 'H01', 'F12', 'C01', 'A01', 'B12', 'B01'], _names={'G03': {'H12', 'A01', 'B12'}, 'NTT': {'F12', 'F01', 'C12'}, 'S202N': {'C01', 'G12', 'H01'}, 'V224Q': {'A12', 'G01', 'B01'}})
[66]:
cl_an.load_additions("additions.cl")
print(cl_an.x)
cl_an.x = prtecan.calculate_conc(cl_an.additions, 1000)
cl_an.x
[0 0 0 0 0 0 0 0 0]
[66]:
array([ 0. , 17.54385965, 34.48275862, 50.84745763,
66.66666667, 81.96721311, 96.77419355, 138.46153846,
164.17910448])
[67]:
fres = cl_an.result_global[well]
print(fres.is_valid(), fres.result.bic, fres.result.redchi)
fres.figure
True 20.492384306723416 2.055343573207545
[67]:

2.4.4. Plotting#
[68]:
tit.results[2].plot_k(title="2014-12-23")
[68]:

2.4.5. selection#
[69]:
tit.params.nrm = True
tit.params.dil = True
tit.params.bg_mth = "fit"
tit
[69]:
Titration
files=["pH9.1_200214.xls", ...],
x=[np.float64(9.0633), np.float64(8.35), np.float64(7.7), np.float64(7.08), np.float64(6.44), np.float64(5.83), np.float64(4.99)],
x_err=[np.float64(0.0115), np.float64(0.02), np.float64(0.08), np.float64(0.03), np.float64(0.0872), np.float64(0.1), np.float64(0.0361)],
labels=dict_keys([1, 2]),
params=TitrationConfig(bg=True, bg_adj=False, dil=True, nrm=True, bg_mth='fit', mcmc=False) pH=True additions=[100, 2, 2, 2, 2, 2, 2]
scheme=PlateScheme(file='./scheme.txt', _buffer=['D01', 'E01', 'D12', 'E12'], _discard=[], _ctrl=['F01', 'C12', 'G12', 'H12', 'A12', 'G01', 'H01', 'F12', 'C01', 'A01', 'B12', 'B01'], _names={'G03': {'H12', 'A01', 'B12'}, 'NTT': {'F12', 'F01', 'C12'}, 'S202N': {'C01', 'G12', 'H01'}, 'V224Q': {'A12', 'G01', 'B01'}}))
[70]:
df_ctr = tit.results[1].dataframe
for name, wells in tit.scheme.names.items():
for well in wells:
df_ctr.loc[well, "ctrl"] = name
df_ctr.loc[df_ctr["ctrl"].isna(), "ctrl"] = "U"
sb.set_style("whitegrid")
g = sb.PairGrid(
df_ctr,
x_vars=["K", "S1_default", "S0_default"],
y_vars=["K", "S1_default", "S0_default"],
hue="ctrl",
palette="Set1",
diag_sharey=False,
)
g.map_lower(plt.scatter)
g.map_upper(sb.kdeplot, fill=True)
g.map_diag(sb.kdeplot)
g.add_legend()
WARNING - Skip fit for well H02 for Label:1.
WARNING:clophfit.prtecan.prtecan:Skip fit for well H02 for Label:1.
[70]:
<seaborn.axisgrid.PairGrid at 0x7f9115297470>

[71]:
with sb.axes_style("darkgrid"):
g = sb.pairplot(
tit.result_global.dataframe[["S1_y2", "S0_y2", "K", "S1_y1", "S0_y1"]],
hue="S1_y1",
palette="Reds",
corner=True,
diag_kind="kde",
)

2.4.6. combining#
[72]:
keys_unk = tit.fit_keys - set(tit.scheme.ctrl)
res_unk = tit.results[1].dataframe.loc[list(keys_unk)].sort_index()
res_unk["well"] = res_unk.index
[73]:
f = plt.figure(figsize=(24, 14))
# Make the PairGrid
g = sb.PairGrid(
res_unk,
x_vars=["K", "S1_default", "S0_default"],
y_vars="well",
height=12,
aspect=0.4,
)
# Draw a dot plot using the stripplot function
g.map(sb.stripplot, size=14, orient="h", palette="Set2", edgecolor="auto")
# Use the same x axis limits on all columns and add better labels
# g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["$pK_a$", "B$_{neutral}$", "B$_{anionic}$"]
for ax, title in zip(g.axes.flat, titles, strict=False):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sb.despine(left=True, bottom=True)
<Figure size 2400x1400 with 0 Axes>
