-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprocessing.py
More file actions
229 lines (173 loc) · 11.3 KB
/
processing.py
File metadata and controls
229 lines (173 loc) · 11.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import puncta_tracker as tracker
import numpy as np
from skimage import io
import pandas as pd
import data
import glob
import os
def batch_gather_dots_from_directory(input_directory: str, output_directory: str, blob_threshold: int):
file_names = os.listdir(input_directory)
file_names = [file for file in file_names if '.tif' in file]
for file in file_names:
dots = gather_dots(input_directory+file, blob_threshold)
dots.to_csv('{}{}_threshold_{}_dot_database.csv'.format(output_directory, file[:-4], str(blob_threshold)))
def gather_dots(data_image_file_path: str, blob_threshold):
print('currently finding dots in sample...' + data_image_file_path)
data_image = io.imread(data_image_file_path)
data_image = np.asarray(data_image, dtype=np.float64)
dots = tracker.find_puncta(data_image,
target_radius=5,
blob_min_radius=3,
blob_threshold=blob_threshold)
return dots
def filter_dots_by_intensity_and_background(data_image_file_path: str, associated_dot_database: pd.DataFrame):
data_image = io.imread(data_image_file_path)
background_mean = data_image.mean()
background_std = data_image.std()
dot_mean = associated_dot_database['mean_intensity'].mean()
dot_std = associated_dot_database['mean_intensity'].std()
return data.filter_dots(dot_dataframe=associated_dot_database,
max_blob_r=5,
mean_threshold=background_mean + 0.1*(dot_mean - background_mean),
max_intensity=dot_mean + 6 * dot_std)
def batch_filter_gaussian_fitted_dots(input_directory: str,
output_directory: str,
database_file_unique_keyword=None,
max_gaussian_deviation=1.5,
max_gaussian_height=1000,
max_elliptic_ratio=1.2,
max_mean_height_percentage_difference=0.2):
image_files = glob.glob(input_directory+'*.tif')
dot_database_files = glob.glob(input_directory+'*.csv')
if database_file_unique_keyword is not None:
dot_database_files = [file for file in dot_database_files if database_file_unique_keyword in file]
image_to_dot_pairing = {}
for image_file in image_files:
associated_dot_database = [file for file in dot_database_files if image_file[:-4] in file]
image_to_dot_pairing[image_file] = associated_dot_database[0]
for image_file, dot_database_file in image_to_dot_pairing.items():
associated_dot_database = pd.read_csv(dot_database_file, index_col=0)
print('Currently filtering dot database for {} '.format(image_file.split(os.sep)[-1]))
filtered_database = filter_dots_by_intensity_and_background(data_image_file_path=image_file,
associated_dot_database=associated_dot_database)
# Note that the parameters below are not accessible from outside the function. I might change this in the
# future, but for now, this works only when the mean is calculated using a 5x5 window and Gaussian fitted with
# 11x11
filtered_database = data.filter_dots(
filtered_database,
max_gaussian_deviation=max_gaussian_deviation,
max_gaussian_height=max_gaussian_height,
max_elliptic_ratio=max_elliptic_ratio,
max_mean_height_percentage_difference=max_mean_height_percentage_difference
)
filtered_database.reset_index()
output_file_name = image_file.split(os.sep)[-1][:-4]
filtered_database.to_csv('{}{}_filtered_gaussian_fitted_dots.csv'.format(output_directory, output_file_name))
def gaussian_fit_dots(data_image_file_path: str, associated_dot_database: pd.DataFrame, gaussian_fit_diameter: int):
data_image = io.imread(data_image_file_path)
data_image = np.asarray(data_image, dtype=np.float64)
return data.add_gaussian_fit_params_to_dot_database(associated_dot_database, data_image, gaussian_fit_diameter)
def batch_gaussian_fit_dots(input_directory: str, output_directory: str, gaussian_fit_diameter=11):
# The function takes all the dot-dababase files and the associated tif files and do gaussian fitting on the dots
image_files = glob.glob(input_directory+'*.tif')
dot_database_files = glob.glob(input_directory+'*.csv')
image_to_dot_pairing_map = {}
# map dot database to images
for image_file in image_files:
associated_dot_database = [file for file in dot_database_files if image_file[:-4] in file]
image_to_dot_pairing_map[image_file] = associated_dot_database[0]
for image_file, dot_database_file in image_to_dot_pairing_map.items():
associated_dot_database = pd.read_csv(dot_database_file, index_col=0)
print('Currently processing sample {}'.format(image_file.split(os.sep)[-1]))
fitted_gaussian_database = gaussian_fit_dots(image_file, associated_dot_database, gaussian_fit_diameter)
output_file_name = image_file.split(os.sep)[-1][:-4]
fitted_gaussian_database.to_csv('{}{}_gaussian_fitted_dots.csv'.format(output_directory, output_file_name))
def batch_tracing(input_directory: str,
output_directory: str,
database_file_unique_keyword=None,
max_frame_gap=0,
max_spatial_jump=0,
frame_time_interval=1):
dot_database_files = glob.glob(input_directory + '*.csv')
if database_file_unique_keyword is not None:
dot_database_files = [file for file in dot_database_files if database_file_unique_keyword in file]
for dot_database_file in dot_database_files:
dots = pd.read_csv(dot_database_file, index_col=0)
print('Tracing {}'.format(dot_database_file.split(os.sep)[-1]))
dot_trace_assignment = tracker.simple_tracker(dots, max_frame_gap, max_spatial_jump)
traces = data.compile_trace_data(dots_database=dots,
dot_trace_mapping=dot_trace_assignment,
frame_time_interval=frame_time_interval)
output_file_name = dot_database_file.split(os.sep)[-1][:-4]
dot_trace_map = pd.DataFrame(list(dot_trace_assignment.items()), columns=['dot_ID', 'trace_ID'])
traces.to_csv('{}{}_traces.csv'.format(output_directory, output_file_name))
dot_trace_map.to_csv('{}{}_mapping.csv'.format(output_directory, output_file_name))
def batch_filter_traces(input_directory: str,
output_directory: str,
database_file_unique_keyword=None,
min_spatial_difference=0,
gap_threshold_for_eliminate_co_localized_traces=0,
min_x_coor=10,
max_x_coor=790,
min_y_coor=10,
max_y_coor=790):
trace_database_file_names = glob.glob(input_directory+'*.csv')
if database_file_unique_keyword is not None:
trace_database_file_names = [file for file in trace_database_file_names if database_file_unique_keyword in file]
for trace_database_file_name in trace_database_file_names:
trace_data = pd.read_csv(trace_database_file_name, index_col=0)
print('Filtering trace data {}'.format(trace_database_file_name.split(os.sep)[-1]))
trace_data_spatial_filter = pd.DataFrame()
trace_data_temporal_filter = pd.DataFrame()
if min_spatial_difference > 0:
trace_data_spatial_filter = data.remove_concurrent_and_overlapping_traces(trace_data, min_spatial_difference)
if gap_threshold_for_eliminate_co_localized_traces > 0:
trace_data_temporal_filter = data.remove_traces_with_long_gaps(trace_data, max_spatial_difference=5,
gap_threshold=gap_threshold_for_eliminate_co_localized_traces)
trace_data = trace_data_spatial_filter.merge(trace_data_temporal_filter, how='inner')
trace_data = data.filter_trace_by_start_end_and_xy(trace_data, min_x_coor, max_x_coor, min_y_coor, max_y_coor)
output_file_name = trace_database_file_name.split(os.sep)[-1][:-4]
trace_data.to_csv('{}{}_cropped.csv'.format(output_directory, output_file_name))
def batch_tally_trace_dwell_times(input_directory: str,
output_directory: str,
input_file_unique_keyword=None,
output_file_keyword='tally'):
trace_database_file_names = glob.glob(input_directory+'*.csv')
if input_file_unique_keyword is not None:
trace_database_file_names = [file for file in trace_database_file_names if input_file_unique_keyword in file]
for trace_database_file_name in trace_database_file_names:
trace_data = pd.read_csv(trace_database_file_name, index_col=0)
dwell_time_tally = data.count_instances(trace_data, 'dwell_time')
output_file_name = trace_database_file_name.split(os.sep)[-1][:-4]
dwell_time_tally.to_csv('{}{}_{}.csv'.format(output_directory, output_file_name, output_file_keyword))
def adjust_dwell_time_by_frame_intervals(input_directory: str,
output_directory: str,
frame_interval,
input_file_unique_keyword=None):
trace_database_file_names = glob.glob(input_directory + '*.csv')
if input_file_unique_keyword is not None:
trace_database_file_names = [file for file in trace_database_file_names if input_file_unique_keyword in file]
output_file_unique_keywords = ''
if input_directory == output_directory:
output_file_unique_keywords = '_processed'
for trace_database_file_name in trace_database_file_names:
trace_data = pd.read_csv(trace_database_file_name, index_col=0)
trace_data['dwell_time'] = trace_data['dwell_by_frame'] * frame_interval
output_file_name = trace_database_file_name.split(os.sep)[-1][:-4]
trace_data.to_csv(('{}{}{}.csv'.format(output_directory, output_file_name, output_file_unique_keywords)))
def batch_filter_trace_sampling_space(input_directory: str,
output_directory: str,
dwell_time_limit=None,
input_file_unique_keyword=None,
output_file_unique_keyword=None):
trace_database_file_names = glob.glob(input_directory + '*.csv')
if input_file_unique_keyword is not None:
trace_database_file_names = [file for file in trace_database_file_names if input_file_unique_keyword in file]
if output_file_unique_keyword is None:
if input_directory == output_directory:
output_file_unique_keyword = '_processed'
for trace_database_file_name in trace_database_file_names:
trace_data = pd.read_csv(trace_database_file_name, index_col=0)
filtered_data = data.filter_traces_sample_space(trace_data, dwell_time_limit)
output_file_core_name = trace_database_file_name.split(os.sep)[-1][:-4]
filtered_data.to_csv(('{}{}{}.csv'.format(output_directory, output_file_core_name, output_file_unique_keyword)))