lgq12697 commited on
Commit
5c709db
1 Parent(s): b656b18

Add Plant DNAGemma model for promoter strength in protoplast prediction

Browse files
README.md CHANGED
@@ -1,3 +1,63 @@
1
- ---
2
- license: cc-by-nc-sa-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ widget:
4
+ - text: AGTCCAGTGGACGACCAGCCACGGCTCCGGTCTGTAGAACCATCGCGGAAACGGCTCGCAAAACTCTAAACAGCGCAAACGATGCGCGCGCCGAAGCAACCCGGCTCTACTTATAAAAACGTCCAACGGTGAGCACCGAGCAGCTACTACTCGTACTCCCCCCACCGATC
5
+ tags:
6
+ - DNA
7
+ - biology
8
+ - genomics
9
+ ---
10
+ # Plant foundation DNA large language models
11
+
12
+ The plant DNA large language models (LLMs) contain a series of foundation models based on different model architectures, which are pre-trained on various plant reference genomes.
13
+ All the models have a comparable model size between 90 MB and 150 MB, BPE tokenizer is used for tokenization and 8000 tokens are included in the vocabulary.
14
+
15
+
16
+ **Developed by:** zhangtaolab
17
+
18
+ ### Model Sources
19
+
20
+ - **Repository:** [Plant DNA LLMs](https://github.com/zhangtaolab/plant_DNA_LLMs)
21
+ - **Manuscript:** [Versatile applications of foundation DNA large language models in plant genomes]()
22
+
23
+ ### Architecture
24
+
25
+ The model is trained based on the Google Gemma model with modified tokenizer specific for DNA sequence.
26
+
27
+ This model is fine-tuned for predicting promoter strength in maize protoplasts system.
28
+
29
+
30
+ ### How to use
31
+
32
+ Install the runtime library first:
33
+ ```bash
34
+ pip install transformers
35
+ ```
36
+
37
+ Here is a simple code for inference:
38
+ ```python
39
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
40
+
41
+ model_name = 'plant-dnagemma-promoter_strength_protoplast'
42
+ # load model and tokenizer
43
+ model = AutoModelForSequenceClassification.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
44
+ tokenizer = AutoTokenizer.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
45
+
46
+ # inference
47
+ sequences = ['TACTCTAATCGTATCAGCTGCACTTGCGTACAGGCTACCGGCGTCCTCAGCCACGTAAGAAAAGGCCCAATAAAGGCCCAACTACAACCAGCGGATATATATACTGGAGCCTGGCGAGATCACCCTAACCCCTCACACTCCCATCCAGCCGCCACCAGGTGCAGAGTGTT',
48
+ 'ATTTCAAAACTAGTTTTCTATAAACGAAAACTTATATTTATTCCGCTTGTTCCGTTTGATCTGCTGATTCGACACCGTTTTAACGTATTTTAAGTAAGTATCAGAAATATTAATGTGAAGATAAAAGAAAATAGAGTAAATGTAAAGGAAAATGCATAAGATTTTGTTGA']
49
+ pipe = pipeline('text-classification', model=model, tokenizer=tokenizer,
50
+ trust_remote_code=True, function_to_apply="none")
51
+ results = pipe(sequences)
52
+ print(results)
53
+
54
+ ```
55
+
56
+
57
+ ### Training data
58
+ We use GemmaForSequenceClassification to fine-tune the model.
59
+ Detailed training procedure can be found in our manuscript.
60
+
61
+
62
+ #### Hardware
63
+ Model was trained on a NVIDIA GTX1080Ti GPU (11 GB).
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Plant_DNAGemma_promoter_strength_protoplast",
3
+ "architectures": [
4
+ "GemmaForSequenceClassification"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu_pytorch_tanh",
12
+ "hidden_activation": "gelu_pytorch_tanh",
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "Promoter strength in tobacco leaves"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Promoter strength in tobacco leaves": 0
21
+ },
22
+ "max_position_embeddings": 1024,
23
+ "model_type": "gemma",
24
+ "num_attention_heads": 12,
25
+ "num_hidden_layers": 12,
26
+ "num_key_value_heads": 1,
27
+ "pad_token_id": 0,
28
+ "problem_type": "regression",
29
+ "rms_norm_eps": 1e-06,
30
+ "rope_scaling": null,
31
+ "rope_theta": 10000.0,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.39.1",
34
+ "use_cache": true,
35
+ "vocab_size": 8002
36
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed4a762b65b0972f43dd3da9531cc4ac07bc56c736c182bbec6e50acd9c2a861
3
+ size 609779736
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<bos>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<eos>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<bos>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "<eos>",
41
+ "legacy": null,
42
+ "model_max_length": 512,
43
+ "pad_token": "<pad>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "GemmaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }