AtomBio commited on
Commit
25f05fc
1 Parent(s): 3c6a467

Created api_prediction.py

Browse files
Files changed (1) hide show
  1. api_prediction.py +305 -0
api_prediction.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.tensorboard import SummaryWriter
3
+ from torch.utils.data import DataLoader
4
+ import numpy as np
5
+ from sklearn.metrics import *
6
+ from omegaconf import OmegaConf
7
+ import os
8
+ import random
9
+
10
+ from mcts import MCTS
11
+ import esm
12
+ from encoders import AptaBLE
13
+ from utils import get_scores, API_Dataset, get_nt_esm_dataset, rna2vec
14
+ from accelerate import Accelerator
15
+ import glob
16
+ import os
17
+ import requests
18
+
19
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
20
+
21
+
22
+
23
+ # accelerator = Accelerator(kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)]) # NOTE: Buggy | Disables unused parameter issue
24
+ accelerator = Accelerator()
25
+
26
+ class AptaBLE_Pipeline():
27
+ """In-house API prediction score pipeline."""
28
+ def __init__(self, lr, dropout, weight_decay, epochs, model_type, model_version, model_save_path, accelerate_save_path, tensorboard_logdir, *args, **kwargs):
29
+ self.device = accelerator.device
30
+ self.lr = lr
31
+ self.weight_decay = weight_decay
32
+ self.epochs = epochs
33
+ self.model_type = model_type
34
+ self.model_version = model_version
35
+ self.model_save_path = model_save_path
36
+ self.accelerate_save_path = accelerate_save_path
37
+ self.tensorboard_logdir = tensorboard_logdir
38
+ esm_prot_encoder, self.esm_alphabet = esm.pretrained.esm.pretrained.esm2_t33_650M_UR50D() # ESM-2 Encoder
39
+
40
+ # Freeze ESM-2
41
+ for name, param in esm_prot_encoder.named_parameters():
42
+ param.requires_grad = False
43
+ for name, param in esm_prot_encoder.named_parameters():
44
+ if "layers.30" in name or "layers.31" in name or "layers.32" in name:
45
+ param.requires_grad = True
46
+
47
+ self.batch_converter = self.esm_alphabet.get_batch_converter(truncation_seq_length=1678)
48
+
49
+ # self.nt_tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-2.5b-1000g")
50
+ # nt_encoder = AutoModelForMaskedLM.from_pretrained("InstaDeepAI/nucleotide-transformer-2.5b-1000g")
51
+ self.nt_tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-50m-multi-species", trust_remote_code=True)
52
+ nt_encoder = AutoModelForMaskedLM.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-50m-multi-species", trust_remote_code=True)
53
+
54
+ self.model = AptaBLE(
55
+ apta_encoder=nt_encoder,
56
+ prot_encoder=esm_prot_encoder,
57
+ dropout=dropout,
58
+ ).to(self.device)
59
+
60
+ self.criterion = torch.nn.BCELoss().to(self.device)
61
+
62
+
63
+ def train(self):
64
+ print('Training the model!')
65
+
66
+ # Initialize writer instance
67
+ writer = SummaryWriter(log_dir=f"log/{self.model_type}/{self.model_version}")
68
+
69
+ # Initialize early stopping
70
+ self.early_stopper = EarlyStopper(3, 3)
71
+ self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
72
+ self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, [4, 7, 10], 0.1)
73
+
74
+
75
+ # Configure pytorch objects for distributed environment (i.e. sharded dataloader, multiple copies of model, etc.)
76
+ self.model, self.optimizer, self.train_loader, self.test_loader, self.bench_loader, self.scheduler = accelerator.prepare(self.model, self.optimizer, self.train_loader, self.test_loader, self.bench_loader, self.scheduler)
77
+ best_loss = 100
78
+
79
+ for epoch in range(1, self.epochs+1):
80
+ self.model.train()
81
+ loss_train, _, _ = self.batch_step(self.train_loader, train_mode=True)
82
+ self.model.eval()
83
+ self.scheduler.step()
84
+ with torch.no_grad():
85
+ loss_test, pred_test, target_test = self.batch_step(self.test_loader, train_mode=False)
86
+ test_scores = get_scores(target_test, pred_test)
87
+ print("\tTrain Loss: {: .6f}\tTest Loss: {: .6f}\tTest ACC: {:.6f}\tTest AUC: {:.6f}\tTest MCC: {:.6f}\tTest PR_AUC: {:.6f}\tF1: {:.6f}\n".format(loss_train ,loss_test, test_scores['acc'], test_scores['roc_auc'], test_scores['mcc'], test_scores['pr_auc'], test_scores['f1']))
88
+ # stop_early = self.early_stopper.early_stop(loss_test)
89
+ # Early stop - model has not improved on eval set.
90
+ # if stop_early:
91
+ # break
92
+ # Only do checkpointing after near-convergence
93
+ if epoch > 2:
94
+ with torch.no_grad():
95
+ loss_bench, pred_bench, target_bench = self.batch_step(self.bench_loader, train_mode=False)
96
+ bench_scores = get_scores(target_bench, pred_bench)
97
+ print("\Bench Loss: {: .6f}\Bench ACC: {:.6f}\Bench AUC: {:.6f}\tBench MCC: {:.6f}\tBench PR_AUC: {:.6f}\tBench F1: {:.6f}\n".format(loss_bench, bench_scores['acc'], bench_scores['roc_auc'], bench_scores['mcc'], bench_scores['pr_auc'], bench_scores['f1']))
98
+ writer.add_scalar("Loss/bench", loss_bench, epoch)
99
+ for k, v in bench_scores.items():
100
+ if isinstance(v, float):
101
+ writer.add_scalar(f'{k}/bench', bench_scores[k], epoch)
102
+ # Checkpoint based off of benchmark criteria
103
+ # If model has improved and early stopping patience counter was just reset:
104
+ if bench_scores['mcc'] > 0.5 and test_scores['mcc'] > 0.5 and loss_bench < 0.9 and accelerator.is_main_process:
105
+ best_loss = loss_test
106
+ # Remove all other files
107
+ # for f in glob.glob(f'{self.model_save_path}/model*.pt'):
108
+ # os.remove(f)
109
+ accelerator.save_state(self.accelerate_save_path)
110
+ model = accelerator.unwrap_model(self.model)
111
+ torch.save(model.state_dict(), f'{self.model_save_path}/model_epoch={epoch}.pt')
112
+ print(f'Model saved at {self.model_save_path}')
113
+ print(f'Accelerate statistics saved at {self.accelerate_save_path}!') # Access via accelerator.load_state("./output")
114
+
115
+ # logging
116
+ writer.add_scalar("Loss/train", loss_train, epoch)
117
+ writer.add_scalar("Loss/test", loss_test, epoch)
118
+ for k, v in test_scores.items():
119
+ if isinstance(v, float):
120
+ writer.add_scalar(f'{k}/test', test_scores[k], epoch)
121
+
122
+ print("Training finished | access tensorboard via 'tensorboard --logdir=runs'.")
123
+ writer.flush()
124
+ writer.close()
125
+
126
+ def batch_step(self, loader, train_mode = True):
127
+ loss_total = 0
128
+ pred = np.array([])
129
+ target = np.array([])
130
+ for batch_idx, (apta, esm_prot, y, apta_attn, prot_attn) in enumerate(loader):
131
+ if train_mode:
132
+ self.optimizer.zero_grad()
133
+
134
+ y_pred = self.predict(apta, esm_prot, apta_attn, prot_attn)
135
+ y_true = torch.tensor(y, dtype=torch.float32).to(self.device) # not needed since accelerator modifies dataloader to automatically map input objects to correct dev
136
+ loss = self.criterion(torch.flatten(y_pred), y_true)
137
+
138
+ if train_mode:
139
+ accelerator.backward(loss) # Accelerate backward() method scales gradients and uses appropriate backward method as configured across devices
140
+ self.optimizer.step()
141
+
142
+ loss_total += loss.item()
143
+
144
+ pred = np.append(pred, torch.flatten(y_pred).clone().detach().cpu().numpy())
145
+ target = np.append(target, torch.flatten(y_true).clone().detach().cpu().numpy())
146
+ mode = 'train' if train_mode else 'eval'
147
+ print(mode + "[{}/{}({:.0f}%)]".format(batch_idx, len(loader), 100. * batch_idx / len(loader)), end = "\r", flush=True)
148
+ loss_total /= len(loader)
149
+ return loss_total, pred, target
150
+
151
+ def predict(self, apta, esm_prot, apta_attn, prot_attn):
152
+ y_pred, _, _, _ = self.model(apta, esm_prot, apta_attn, prot_attn)
153
+ return y_pred
154
+
155
+ def inference(self, apta, prot, labels):
156
+ """Perform inference on a batch of aptamer/protein pairs."""
157
+ self.model.eval()
158
+
159
+ max_length = 275#nt_tokenizer.model_max_length
160
+
161
+ inputs = [(i, j) for i, j in zip(labels, prot)]
162
+ _, _, prot_tokens = self.batch_converter(inputs)
163
+ apta_toks = self.nt_tokenizer.batch_encode_plus(apta, return_tensors='pt', padding='max_length', max_length=max_length)['input_ids']
164
+ apta_attention_mask = apta_toks != self.nt_tokenizer.pad_token_id
165
+
166
+ # # truncating
167
+ prot_tokenized = prot_tokens[:, :1680]
168
+ # # padding
169
+ prot_ex = torch.ones((prot_tokenized.shape[0], 1680), dtype=torch.int64)*self.esm_alphabet.padding_idx
170
+ prot_ex[:, :prot_tokenized.shape[1]] = prot_tokenized
171
+ prot_attention_mask = prot_ex != self.esm_alphabet.padding_idx
172
+
173
+ loader = DataLoader(API_Dataset(apta_toks, prot_ex, labels, apta_attention_mask, prot_attention_mask), batch_size=1, shuffle=False)
174
+
175
+ self.model, loader = accelerator.prepare(self.model, loader)
176
+ with torch.no_grad():
177
+ _, pred, _ = self.batch_step(loader, train_mode=False)
178
+
179
+ return pred
180
+
181
+ def recommend(self, target, n_aptamers, depth, iteration, verbose=True):
182
+
183
+ candidates = []
184
+ _, _, prot_tokens = self.batch_converter([(1, target)])
185
+ prot_tokenized = torch.tensor(prot_tokens, dtype=torch.int64)
186
+ # adjusting for max protein sequence length during model training
187
+ encoded_targetprotein = torch.ones((prot_tokenized.shape[0], 1678), dtype=torch.int64)*self.esm_alphabet.padding_idx
188
+ encoded_targetprotein[:, :prot_tokenized.shape[1]] = prot_tokenized
189
+ encoded_targetprotein = encoded_targetprotein.to(self.device)
190
+
191
+ mcts = MCTS(encoded_targetprotein, depth=depth, iteration=iteration, states=8, target_protein=target, device=self.device, esm_alphabet=self.esm_alphabet)
192
+
193
+ for _ in range(n_aptamers):
194
+ mcts.make_candidate(self.model)
195
+ candidates.append(mcts.get_candidate())
196
+
197
+ self.model.eval()
198
+ with torch.no_grad():
199
+ sim_seq = np.array([mcts.get_candidate()])
200
+ print('first candidate: ', sim_seq)
201
+ # apta = torch.tensor(rna2vec(sim_seq), dtype=torch.int64).to(self.device)
202
+ apta = self.nt_tokenizer.batch_encode_plus(sim_seq, return_tensors='pt', padding='max_length', max_length=275)['input_ids']
203
+ apta_attn = apta != self.nt_tokenizer.pad_token_id
204
+ prot_attn = encoded_targetprotein != self.esm_alphabet.padding_idx
205
+ score, _, _, _ = self.model(apta.to(self.device), encoded_targetprotein.to(self.device), apta_attn.to(self.device), prot_attn.to(self.device))
206
+
207
+ if verbose:
208
+ candidate = mcts.get_candidate()
209
+ print("candidate:\t", candidate, "\tscore:\t", score)
210
+ print("*"*80)
211
+ mcts.reset()
212
+
213
+ def set_data_for_training(self, filepath, batch_size):
214
+ # ds_train, ds_test, ds_bench = get_nt_esm_dataset(filepath, self.nt_tokenizer, self.batch_converter, self.esm_alphabet)
215
+ ds_train, ds_test, ds_bench = get_nt_esm_dataset(filepath, self.nt_tokenizer, self.batch_converter, self.esm_alphabet)
216
+
217
+ self.train_loader = DataLoader(API_Dataset(ds_train[0], ds_train[1], ds_train[2], ds_train[3], ds_train[4]), batch_size=batch_size, shuffle=True)
218
+ self.test_loader = DataLoader(API_Dataset(ds_test[0], ds_test[1], ds_test[2], ds_test[3], ds_test[4]), batch_size=batch_size, shuffle=False)
219
+ self.bench_loader = DataLoader(API_Dataset(ds_bench[0], ds_bench[1], ds_bench[2], ds_bench[3], ds_bench[4]), batch_size=batch_size, shuffle=False)
220
+
221
+ class EarlyStopper:
222
+ def __init__(self, patience=1, min_delta=0):
223
+ self.patience = patience
224
+ self.min_delta = min_delta
225
+ self.counter = 0
226
+ self.min_validation_loss = float('inf')
227
+
228
+ def early_stop(self, validation_loss):
229
+ if validation_loss < self.min_validation_loss:
230
+ self.min_validation_loss = validation_loss
231
+ self.counter = 0
232
+ elif validation_loss > (self.min_validation_loss + self.min_delta):
233
+ self.counter += 1
234
+ if self.counter >= self.patience:
235
+ return True
236
+ return False
237
+
238
+ def seed_torch(seed=5471):
239
+ random.seed(seed)
240
+ os.environ['PYTHONHASHSEED'] = str(seed)
241
+ np.random.seed(seed)
242
+ torch.manual_seed(seed)
243
+ torch.cuda.manual_seed(seed)
244
+ torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
245
+ torch.backends.cudnn.benchmark = False
246
+ torch.backends.cudnn.deterministic = True
247
+
248
+ def main():
249
+ conf = OmegaConf.load('config.yaml')
250
+ hyperparameters = conf.hyperparameters
251
+ logging = conf.logging
252
+
253
+ lr = hyperparameters['lr']
254
+ wd = hyperparameters['weight_decay']
255
+ dropout = hyperparameters['dropout']
256
+ batch_size = hyperparameters['batch_size']
257
+ epochs = hyperparameters['epochs']
258
+ model_type = logging['model_type']
259
+ model_version = logging['model_version']
260
+ model_save_path = logging['model_save_path']
261
+ accelerate_save_path = logging['accelerate_save_path']
262
+ tensorboard_logdir = logging['tensorboard_logdir']
263
+ seed = hyperparameters['seed']
264
+
265
+ if not os.path.exists(model_save_path):
266
+ os.makedirs(model_save_path)
267
+
268
+ seed_torch(seed=seed)
269
+
270
+ pipeline = AptaBLE_Pipeline(
271
+ lr=lr,
272
+ weight_decay=wd,
273
+ epochs=epochs,
274
+ model_type=model_type,
275
+ model_version=model_version,
276
+ model_save_path=model_save_path,
277
+ accelerate_save_path=accelerate_save_path,
278
+ tensorboard_logdir=tensorboard_logdir,
279
+ d_model=128,
280
+ d_ff=512,
281
+ n_layers=6,
282
+ n_heads=8,
283
+ dropout=dropout,
284
+ load_best_pt=True, # already loads the pretrained model using the datasets included in repo -- no need to run the bottom two cells
285
+ device='cuda',
286
+ seed=seed)
287
+
288
+ datapath = "./data/ABW_real_dna_aptamers_HC_v6.pkl"
289
+ # datapath = './data/ABW_real_dna_aptamers_HC_neg_scrambles_neg_homology.pkl'
290
+
291
+ pipeline.set_data_for_training(datapath, batch_size=batch_size)
292
+ pipeline.train()
293
+
294
+ endpoint = 'https://slack.atombioworks.com/hooks/t3y99qu6pi81frhwrhef1849wh'
295
+ msg = {"text": "Model has finished training."}
296
+ _ = requests.post(endpoint,
297
+ json=msg,
298
+ headers={"Content-Type": "application/json"},
299
+ )
300
+
301
+ return
302
+
303
+ if __name__ == "__main__":
304
+ # launch training w/ the following: "accelerate launch api_prediction.py [args]"
305
+ main()