Text Generation
Transformers
Safetensors
llama
text-generation-inference
Inference Endpoints
mfromm commited on
Commit
6666131
·
verified ·
1 Parent(s): 2bdb949

Update gptx_tokenizer.py

Browse files
Files changed (1) hide show
  1. gptx_tokenizer.py +2 -1
gptx_tokenizer.py CHANGED
@@ -62,7 +62,7 @@ class HFGPTXTokenizer(PreTrainedTokenizer):
62
  f"<placeholder_tok_{i}>" for i in range(256)
63
  ]
64
 
65
- def find_tokenizer_config(self, config_path: Path, repo_id: str = None) -> Optional[Path]:
66
  if not os.path.isfile(config_path):
67
  config_path = try_to_load_from_cache(repo_id=repo_id, filename=Path(config_path).name)
68
  if not config_path:
@@ -141,6 +141,7 @@ def find_tokenizer_config(self, config_path: Path, repo_id: str = None) -> Optio
141
  return tokenizer_config_file_or_name
142
  except Exception as e:
143
  raise OSError(f"Failed to download tokenizer model: {str(e)}")
 
144
  def __init__(
145
  self,
146
  model_path: Optional[str] = None,
 
62
  f"<placeholder_tok_{i}>" for i in range(256)
63
  ]
64
 
65
+ def find_tokenizer_config(self, config_path: Path, repo_id: str = None) -> Optional[Path]:
66
  if not os.path.isfile(config_path):
67
  config_path = try_to_load_from_cache(repo_id=repo_id, filename=Path(config_path).name)
68
  if not config_path:
 
141
  return tokenizer_config_file_or_name
142
  except Exception as e:
143
  raise OSError(f"Failed to download tokenizer model: {str(e)}")
144
+
145
  def __init__(
146
  self,
147
  model_path: Optional[str] = None,