Fill-Mask
Transformers
PyTorch
Joblib
Safetensors
DNA
biology
genomics
custom_code
Inference Endpoints
hdallatorre commited on
Commit
ac4ca3c
1 Parent(s): 6fdd844

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -0
README.md CHANGED
@@ -43,6 +43,11 @@ import torch
43
  tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-500m-multi-species", trust_remote_code=True)
44
  model = AutoModelForMaskedLM.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-500m-multi-species", trust_remote_code=True)
45
 
 
 
 
 
 
46
  # Create a dummy dna sequence and tokenize it
47
  sequences = ["ATTCCGATTCCGATTCCG", "ATTTCTCTCTCTCTCTGAGATCGATCGATCGAT"]
48
  tokens_ids = tokenizer.batch_encode_plus(sequences, return_tensors="pt", padding="max_length", max_length = max_length)["input_ids"]
 
43
  tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-500m-multi-species", trust_remote_code=True)
44
  model = AutoModelForMaskedLM.from_pretrained("InstaDeepAI/nucleotide-transformer-v2-500m-multi-species", trust_remote_code=True)
45
 
46
+ # Choose the length to which the input sequences are padded. By default, the
47
+ # model max length is chosen, but feel free to decrease it as the time taken to
48
+ # obtain the embeddings increases significantly with it.
49
+ max_length = tokenizer.model_max_length
50
+
51
  # Create a dummy dna sequence and tokenize it
52
  sequences = ["ATTCCGATTCCGATTCCG", "ATTTCTCTCTCTCTCTGAGATCGATCGATCGAT"]
53
  tokens_ids = tokenizer.batch_encode_plus(sequences, return_tensors="pt", padding="max_length", max_length = max_length)["input_ids"]